+
+#include "llimits.h"
+#include "lua.h"
+
+
+/* tags for values visible from Lua */
+#define LAST_TAG LUA_TTHREAD
+
+#define NUM_TAGS (LAST_TAG+1)
+
+
+/*
+** Extra tags for non-values
+*/
+#define LUA_TPROTO (LAST_TAG+1)
+#define LUA_TUPVAL (LAST_TAG+2)
+#define LUA_TDEADKEY (LAST_TAG+3)
+
+
+/*
+** Union of all collectable objects
+*/
+typedef union GCObject GCObject;
+
+
+/*
+** Common Header for all collectable objects (in macro form, to be
+** included in other objects)
+*/
+#define CommonHeader GCObject *next; lu_byte tt; lu_byte marked
+
+
+/*
+** Common header in struct form
+*/
+typedef struct GCheader {
+ CommonHeader;
+} GCheader;
+
+
+
+
+/*
+** Union of all Lua values
+*/
+typedef union {
+ GCObject *gc;
+ void *p;
+ lua_Number n;
+ int b;
+} Value;
+
+
+/*
+** Tagged Values
+*/
+
+#define TValuefields Value value; int tt
+
+typedef struct lua_TValue {
+ TValuefields;
+} TValue;
+
+
+/* Macros to test type */
+#define ttisnil(o) (ttype(o) == LUA_TNIL)
+#define ttisnumber(o) (ttype(o) == LUA_TNUMBER)
+#define ttisstring(o) (ttype(o) == LUA_TSTRING)
+#define ttistable(o) (ttype(o) == LUA_TTABLE)
+#define ttisfunction(o) (ttype(o) == LUA_TFUNCTION)
+#define ttisboolean(o) (ttype(o) == LUA_TBOOLEAN)
+#define ttisuserdata(o) (ttype(o) == LUA_TUSERDATA)
+#define ttisthread(o) (ttype(o) == LUA_TTHREAD)
+#define ttislightuserdata(o) (ttype(o) == LUA_TLIGHTUSERDATA)
+
+/* Macros to access values */
+#define ttype(o) ((o)->tt)
+#define gcvalue(o) check_exp(iscollectable(o), (o)->value.gc)
+#define pvalue(o) check_exp(ttislightuserdata(o), (o)->value.p)
+#define nvalue(o) check_exp(ttisnumber(o), (o)->value.n)
+#define rawtsvalue(o) check_exp(ttisstring(o), &(o)->value.gc->ts)
+#define tsvalue(o) (&rawtsvalue(o)->tsv)
+#define rawuvalue(o) check_exp(ttisuserdata(o), &(o)->value.gc->u)
+#define uvalue(o) (&rawuvalue(o)->uv)
+#define clvalue(o) check_exp(ttisfunction(o), &(o)->value.gc->cl)
+#define hvalue(o) check_exp(ttistable(o), &(o)->value.gc->h)
+#define bvalue(o) check_exp(ttisboolean(o), (o)->value.b)
+#define thvalue(o) check_exp(ttisthread(o), &(o)->value.gc->th)
+
+#define l_isfalse(o) (ttisnil(o) || (ttisboolean(o) && bvalue(o) == 0))
+
+/*
+** for internal debug only
+*/
+#define checkconsistency(obj) \
+ lua_assert(!iscollectable(obj) || (ttype(obj) == (obj)->value.gc->gch.tt))
+
+#define checkliveness(g,obj) \
+ lua_assert(!iscollectable(obj) || \
+ ((ttype(obj) == (obj)->value.gc->gch.tt) && !isdead(g, (obj)->value.gc)))
+
+
+/* Macros to set values */
+#define setnilvalue(obj) ((obj)->tt=LUA_TNIL)
+
+#define setnvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.n=(x); i_o->tt=LUA_TNUMBER; }
+
+#define setpvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.p=(x); i_o->tt=LUA_TLIGHTUSERDATA; }
+
+#define setbvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.b=(x); i_o->tt=LUA_TBOOLEAN; }
+
+#define setsvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TSTRING; \
+ checkliveness(G(L),i_o); }
+
+#define setuvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TUSERDATA; \
+ checkliveness(G(L),i_o); }
+
+#define setthvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTHREAD; \
+ checkliveness(G(L),i_o); }
+
+#define setclvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TFUNCTION; \
+ checkliveness(G(L),i_o); }
+
+#define sethvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTABLE; \
+ checkliveness(G(L),i_o); }
+
+#define setptvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TPROTO; \
+ checkliveness(G(L),i_o); }
+
+
+
+
+#define setobj(L,obj1,obj2) \
+ { const TValue *o2=(obj2); TValue *o1=(obj1); \
+ o1->value = o2->value; o1->tt=o2->tt; \
+ checkliveness(G(L),o1); }
+
+
+/*
+** different types of sets, according to destination
+*/
+
+/* from stack to (same) stack */
+#define setobjs2s setobj
+/* to stack (not from same stack) */
+#define setobj2s setobj
+#define setsvalue2s setsvalue
+#define sethvalue2s sethvalue
+#define setptvalue2s setptvalue
+/* from table to same table */
+#define setobjt2t setobj
+/* to table */
+#define setobj2t setobj
+/* to new object */
+#define setobj2n setobj
+#define setsvalue2n setsvalue
+
+#define setttype(obj, tt) (ttype(obj) = (tt))
+
+
+#define iscollectable(o) (ttype(o) >= LUA_TSTRING)
+
+
+
+typedef TValue *StkId; /* index to stack elements */
+
+
+/*
+** String headers for string table
+*/
+typedef union TString {
+ L_Umaxalign dummy; /* ensures maximum alignment for strings */
+ struct {
+ CommonHeader;
+ lu_byte reserved;
+ unsigned int hash;
+ size_t len;
+ } tsv;
+} TString;
+
+
+#define getstr(ts) cast(const char *, (ts) + 1)
+#define svalue(o) getstr(tsvalue(o))
+
+
+
+typedef union Udata {
+ L_Umaxalign dummy; /* ensures maximum alignment for `local' udata */
+ struct {
+ CommonHeader;
+ struct Table *metatable;
+ struct Table *env;
+ size_t len;
+ } uv;
+} Udata;
+
+
+
+
+/*
+** Function Prototypes
+*/
+typedef struct Proto {
+ CommonHeader;
+ TValue *k; /* constants used by the function */
+ Instruction *code;
+ struct Proto **p; /* functions defined inside the function */
+ int *lineinfo; /* map from opcodes to source lines */
+ struct LocVar *locvars; /* information about local variables */
+ TString **upvalues; /* upvalue names */
+ TString *source;
+ int sizeupvalues;
+ int sizek; /* size of `k' */
+ int sizecode;
+ int sizelineinfo;
+ int sizep; /* size of `p' */
+ int sizelocvars;
+ int linedefined;
+ int lastlinedefined;
+ GCObject *gclist;
+ lu_byte nups; /* number of upvalues */
+ lu_byte numparams;
+ lu_byte is_vararg;
+ lu_byte maxstacksize;
+} Proto;
+
+
+/* masks for new-style vararg */
+#define VARARG_HASARG 1
+#define VARARG_ISVARARG 2
+#define VARARG_NEEDSARG 4
+
+
+typedef struct LocVar {
+ TString *varname;
+ int startpc; /* first point where variable is active */
+ int endpc; /* first point where variable is dead */
+} LocVar;
+
+
+
+/*
+** Upvalues
+*/
+
+typedef struct UpVal {
+ CommonHeader;
+ TValue *v; /* points to stack or to its own value */
+ union {
+ TValue value; /* the value (when closed) */
+ struct { /* double linked list (when open) */
+ struct UpVal *prev;
+ struct UpVal *next;
+ } l;
+ } u;
+} UpVal;
+
+
+/*
+** Closures
+*/
+
+#define ClosureHeader \
+ CommonHeader; lu_byte isC; lu_byte nupvalues; GCObject *gclist; \
+ struct Table *env
+
+typedef struct CClosure {
+ ClosureHeader;
+ lua_CFunction f;
+ TValue upvalue[1];
+} CClosure;
+
+
+typedef struct LClosure {
+ ClosureHeader;
+ struct Proto *p;
+ UpVal *upvals[1];
+} LClosure;
+
+
+typedef union Closure {
+ CClosure c;
+ LClosure l;
+} Closure;
+
+
+#define iscfunction(o) (ttype(o) == LUA_TFUNCTION && clvalue(o)->c.isC)
+#define isLfunction(o) (ttype(o) == LUA_TFUNCTION && !clvalue(o)->c.isC)
+
+
+/*
+** Tables
+*/
+
+typedef union TKey {
+ struct {
+ TValuefields;
+ struct Node *next; /* for chaining */
+ } nk;
+ TValue tvk;
+} TKey;
+
+
+typedef struct Node {
+ TValue i_val;
+ TKey i_key;
+} Node;
+
+
+typedef struct Table {
+ CommonHeader;
+ lu_byte flags; /* 1<lsizenode)))
+
+
+#define luaO_nilobject (&luaO_nilobject_)
+
+LUAI_DATA const TValue luaO_nilobject_;
+
+#define ceillog2(x) (luaO_log2((x)-1) + 1)
+
+LUAI_FUNC int luaO_log2 (unsigned int x);
+LUAI_FUNC int luaO_int2fb (unsigned int x);
+LUAI_FUNC int luaO_fb2int (int x);
+LUAI_FUNC int luaO_rawequalObj (const TValue *t1, const TValue *t2);
+LUAI_FUNC int luaO_str2d (const char *s, lua_Number *result);
+LUAI_FUNC const char *luaO_pushvfstring (lua_State *L, const char *fmt,
+ va_list argp);
+LUAI_FUNC const char *luaO_pushfstring (lua_State *L, const char *fmt, ...);
+LUAI_FUNC void luaO_chunkid (char *out, const char *source, size_t len);
+
+
+#endif
+
diff --git a/deps/lua/src/lopcodes.c b/deps/lua/src/lopcodes.c
new file mode 100644
index 0000000000000000000000000000000000000000..bf9cd522c260aa7a36ff76e19fc1a330d19b6e9c
--- /dev/null
+++ b/deps/lua/src/lopcodes.c
@@ -0,0 +1,102 @@
+/*
+** $Id: lopcodes.c,v 1.37 2005/11/08 19:45:36 roberto Exp $
+** See Copyright Notice in lua.h
+*/
+
+
+#define lopcodes_c
+#define LUA_CORE
+
+
+#include "lopcodes.h"
+
+
+/* ORDER OP */
+
+const char *const luaP_opnames[NUM_OPCODES+1] = {
+ "MOVE",
+ "LOADK",
+ "LOADBOOL",
+ "LOADNIL",
+ "GETUPVAL",
+ "GETGLOBAL",
+ "GETTABLE",
+ "SETGLOBAL",
+ "SETUPVAL",
+ "SETTABLE",
+ "NEWTABLE",
+ "SELF",
+ "ADD",
+ "SUB",
+ "MUL",
+ "DIV",
+ "MOD",
+ "POW",
+ "UNM",
+ "NOT",
+ "LEN",
+ "CONCAT",
+ "JMP",
+ "EQ",
+ "LT",
+ "LE",
+ "TEST",
+ "TESTSET",
+ "CALL",
+ "TAILCALL",
+ "RETURN",
+ "FORLOOP",
+ "FORPREP",
+ "TFORLOOP",
+ "SETLIST",
+ "CLOSE",
+ "CLOSURE",
+ "VARARG",
+ NULL
+};
+
+
+#define opmode(t,a,b,c,m) (((t)<<7) | ((a)<<6) | ((b)<<4) | ((c)<<2) | (m))
+
+const lu_byte luaP_opmodes[NUM_OPCODES] = {
+/* T A B C mode opcode */
+ opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_MOVE */
+ ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_LOADK */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_LOADBOOL */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LOADNIL */
+ ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_GETUPVAL */
+ ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_GETGLOBAL */
+ ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_GETTABLE */
+ ,opmode(0, 0, OpArgK, OpArgN, iABx) /* OP_SETGLOBAL */
+ ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_SETUPVAL */
+ ,opmode(0, 0, OpArgK, OpArgK, iABC) /* OP_SETTABLE */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_NEWTABLE */
+ ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_SELF */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_ADD */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_SUB */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MUL */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_DIV */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MOD */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_POW */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_UNM */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_NOT */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LEN */
+ ,opmode(0, 1, OpArgR, OpArgR, iABC) /* OP_CONCAT */
+ ,opmode(0, 0, OpArgR, OpArgN, iAsBx) /* OP_JMP */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_EQ */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LT */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LE */
+ ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TEST */
+ ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TESTSET */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_CALL */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_TAILCALL */
+ ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_RETURN */
+ ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORLOOP */
+ ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORPREP */
+ ,opmode(1, 0, OpArgN, OpArgU, iABC) /* OP_TFORLOOP */
+ ,opmode(0, 0, OpArgU, OpArgU, iABC) /* OP_SETLIST */
+ ,opmode(0, 0, OpArgN, OpArgN, iABC) /* OP_CLOSE */
+ ,opmode(0, 1, OpArgU, OpArgN, iABx) /* OP_CLOSURE */
+ ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_VARARG */
+};
+
diff --git a/deps/lua/src/lopcodes.h b/deps/lua/src/lopcodes.h
new file mode 100644
index 0000000000000000000000000000000000000000..2834b1d74dadee7625025aeb6746bfdff404229f
--- /dev/null
+++ b/deps/lua/src/lopcodes.h
@@ -0,0 +1,268 @@
+/*
+** $Id: lopcodes.h,v 1.124 2005/12/02 18:42:08 roberto Exp $
+** Opcodes for Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lopcodes_h
+#define lopcodes_h
+
+#include "llimits.h"
+
+
+/*===========================================================================
+ We assume that instructions are unsigned numbers.
+ All instructions have an opcode in the first 6 bits.
+ Instructions can have the following fields:
+ `A' : 8 bits
+ `B' : 9 bits
+ `C' : 9 bits
+ `Bx' : 18 bits (`B' and `C' together)
+ `sBx' : signed Bx
+
+ A signed argument is represented in excess K; that is, the number
+ value is the unsigned value minus K. K is exactly the maximum value
+ for that argument (so that -max is represented by 0, and +max is
+ represented by 2*max), which is half the maximum for the corresponding
+ unsigned argument.
+===========================================================================*/
+
+
+enum OpMode {iABC, iABx, iAsBx}; /* basic instruction format */
+
+
+/*
+** size and position of opcode arguments.
+*/
+#define SIZE_C 9
+#define SIZE_B 9
+#define SIZE_Bx (SIZE_C + SIZE_B)
+#define SIZE_A 8
+
+#define SIZE_OP 6
+
+#define POS_OP 0
+#define POS_A (POS_OP + SIZE_OP)
+#define POS_C (POS_A + SIZE_A)
+#define POS_B (POS_C + SIZE_C)
+#define POS_Bx POS_C
+
+
+/*
+** limits for opcode arguments.
+** we use (signed) int to manipulate most arguments,
+** so they must fit in LUAI_BITSINT-1 bits (-1 for sign)
+*/
+#if SIZE_Bx < LUAI_BITSINT-1
+#define MAXARG_Bx ((1<>1) /* `sBx' is signed */
+#else
+#define MAXARG_Bx MAX_INT
+#define MAXARG_sBx MAX_INT
+#endif
+
+
+#define MAXARG_A ((1<>POS_OP) & MASK1(SIZE_OP,0)))
+#define SET_OPCODE(i,o) ((i) = (((i)&MASK0(SIZE_OP,POS_OP)) | \
+ ((cast(Instruction, o)<>POS_A) & MASK1(SIZE_A,0)))
+#define SETARG_A(i,u) ((i) = (((i)&MASK0(SIZE_A,POS_A)) | \
+ ((cast(Instruction, u)<>POS_B) & MASK1(SIZE_B,0)))
+#define SETARG_B(i,b) ((i) = (((i)&MASK0(SIZE_B,POS_B)) | \
+ ((cast(Instruction, b)<>POS_C) & MASK1(SIZE_C,0)))
+#define SETARG_C(i,b) ((i) = (((i)&MASK0(SIZE_C,POS_C)) | \
+ ((cast(Instruction, b)<>POS_Bx) & MASK1(SIZE_Bx,0)))
+#define SETARG_Bx(i,b) ((i) = (((i)&MASK0(SIZE_Bx,POS_Bx)) | \
+ ((cast(Instruction, b)< C) then pc++ */
+OP_TESTSET,/* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */
+
+OP_CALL,/* A B C R(A), ... ,R(A+C-2) := R(A)(R(A+1), ... ,R(A+B-1)) */
+OP_TAILCALL,/* A B C return R(A)(R(A+1), ... ,R(A+B-1)) */
+OP_RETURN,/* A B return R(A), ... ,R(A+B-2) (see note) */
+
+OP_FORLOOP,/* A sBx R(A)+=R(A+2);
+ if R(A) = R(A+1) then { pc+=sBx; R(A+3)=R(A) }*/
+OP_FORPREP,/* A sBx R(A)-=R(A+2); pc+=sBx */
+
+OP_TFORLOOP,/* A C R(A+3), ... ,R(A+3+C) := R(A)(R(A+1), R(A+2));
+ if R(A+3) ~= nil then { pc++; R(A+2)=R(A+3); } */
+OP_SETLIST,/* A B C R(A)[(C-1)*FPF+i] := R(A+i), 1 <= i <= B */
+
+OP_CLOSE,/* A close all variables in the stack up to (>=) R(A)*/
+OP_CLOSURE,/* A Bx R(A) := closure(KPROTO[Bx], R(A), ... ,R(A+n)) */
+
+OP_VARARG/* A B R(A), R(A+1), ..., R(A+B-1) = vararg */
+} OpCode;
+
+
+#define NUM_OPCODES (cast(int, OP_VARARG) + 1)
+
+
+
+/*===========================================================================
+ Notes:
+ (*) In OP_CALL, if (B == 0) then B = top. C is the number of returns - 1,
+ and can be 0: OP_CALL then sets `top' to last_result+1, so
+ next open instruction (OP_CALL, OP_RETURN, OP_SETLIST) may use `top'.
+
+ (*) In OP_VARARG, if (B == 0) then use actual number of varargs and
+ set top (like in OP_CALL with C == 0).
+
+ (*) In OP_RETURN, if (B == 0) then return up to `top'
+
+ (*) In OP_SETLIST, if (B == 0) then B = `top';
+ if (C == 0) then next `instruction' is real C
+
+ (*) For comparisons, A specifies what condition the test should accept
+ (true or false).
+
+ (*) All `skips' (pc++) assume that next instruction is a jump
+===========================================================================*/
+
+
+/*
+** masks for instruction properties. The format is:
+** bits 0-1: op mode
+** bits 2-3: C arg mode
+** bits 4-5: B arg mode
+** bit 6: instruction set register A
+** bit 7: operator is a test
+*/
+
+enum OpArgMask {
+ OpArgN, /* argument is not used */
+ OpArgU, /* argument is used */
+ OpArgR, /* argument is a register or a jump offset */
+ OpArgK /* argument is a constant or register/constant */
+};
+
+LUAI_DATA const lu_byte luaP_opmodes[NUM_OPCODES];
+
+#define getOpMode(m) (cast(enum OpMode, luaP_opmodes[m] & 3))
+#define getBMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 4) & 3))
+#define getCMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 2) & 3))
+#define testAMode(m) (luaP_opmodes[m] & (1 << 6))
+#define testTMode(m) (luaP_opmodes[m] & (1 << 7))
+
+
+LUAI_DATA const char *const luaP_opnames[NUM_OPCODES+1]; /* opcode names */
+
+
+/* number of list items to accumulate before a SETLIST instruction */
+#define LFIELDS_PER_FLUSH 50
+
+
+#endif
diff --git a/deps/lua/src/loslib.c b/deps/lua/src/loslib.c
new file mode 100644
index 0000000000000000000000000000000000000000..509d7b72e49dffc89dc4f62417d9350d0a7d5c8a
--- /dev/null
+++ b/deps/lua/src/loslib.c
@@ -0,0 +1,238 @@
+/*
+** $Id: loslib.c,v 1.17 2006/01/27 13:54:31 roberto Exp $
+** Standard Operating System library
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+#include
+
+#define loslib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+static int os_pushresult (lua_State *L, int i, const char *filename) {
+ int en = errno; /* calls to Lua API may change this value */
+ if (i) {
+ lua_pushboolean(L, 1);
+ return 1;
+ }
+ else {
+ lua_pushnil(L);
+ if (filename)
+ lua_pushfstring(L, "%s: %s", filename, strerror(en));
+ else
+ lua_pushfstring(L, "%s", strerror(en));
+ lua_pushinteger(L, en);
+ return 3;
+ }
+}
+
+
+static int os_execute (lua_State *L) {
+ lua_pushinteger(L, system(luaL_optstring(L, 1, NULL)));
+ return 1;
+}
+
+
+static int os_remove (lua_State *L) {
+ const char *filename = luaL_checkstring(L, 1);
+ return os_pushresult(L, remove(filename) == 0, filename);
+}
+
+
+static int os_rename (lua_State *L) {
+ const char *fromname = luaL_checkstring(L, 1);
+ const char *toname = luaL_checkstring(L, 2);
+ return os_pushresult(L, rename(fromname, toname) == 0, fromname);
+}
+
+
+static int os_tmpname (lua_State *L) {
+ char buff[LUA_TMPNAMBUFSIZE];
+ int err;
+ lua_tmpnam(buff, err);
+ if (err)
+ return luaL_error(L, "unable to generate a unique filename");
+ lua_pushstring(L, buff);
+ return 1;
+}
+
+
+static int os_getenv (lua_State *L) {
+ lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */
+ return 1;
+}
+
+
+static int os_clock (lua_State *L) {
+ lua_pushnumber(L, ((lua_Number)clock())/(lua_Number)CLOCKS_PER_SEC);
+ return 1;
+}
+
+
+/*
+** {======================================================
+** Time/Date operations
+** { year=%Y, month=%m, day=%d, hour=%H, min=%M, sec=%S,
+** wday=%w+1, yday=%j, isdst=? }
+** =======================================================
+*/
+
+static void setfield (lua_State *L, const char *key, int value) {
+ lua_pushinteger(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setboolfield (lua_State *L, const char *key, int value) {
+ if (value < 0) /* undefined? */
+ return; /* does not set field */
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int getboolfield (lua_State *L, const char *key) {
+ int res;
+ lua_getfield(L, -1, key);
+ res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+}
+
+
+static int getfield (lua_State *L, const char *key, int d) {
+ int res;
+ lua_getfield(L, -1, key);
+ if (lua_isnumber(L, -1))
+ res = (int)lua_tointeger(L, -1);
+ else {
+ if (d < 0)
+ return luaL_error(L, "field " LUA_QS " missing in date table", key);
+ res = d;
+ }
+ lua_pop(L, 1);
+ return res;
+}
+
+
+static int os_date (lua_State *L) {
+ const char *s = luaL_optstring(L, 1, "%c");
+ time_t t = lua_isnoneornil(L, 2) ? time(NULL) :
+ (time_t)luaL_checknumber(L, 2);
+ struct tm *stm;
+ if (*s == '!') { /* UTC? */
+ stm = gmtime(&t);
+ s++; /* skip `!' */
+ }
+ else
+ stm = localtime(&t);
+ if (stm == NULL) /* invalid date? */
+ lua_pushnil(L);
+ else if (strcmp(s, "*t") == 0) {
+ lua_createtable(L, 0, 9); /* 9 = number of fields */
+ setfield(L, "sec", stm->tm_sec);
+ setfield(L, "min", stm->tm_min);
+ setfield(L, "hour", stm->tm_hour);
+ setfield(L, "day", stm->tm_mday);
+ setfield(L, "month", stm->tm_mon+1);
+ setfield(L, "year", stm->tm_year+1900);
+ setfield(L, "wday", stm->tm_wday+1);
+ setfield(L, "yday", stm->tm_yday+1);
+ setboolfield(L, "isdst", stm->tm_isdst);
+ }
+ else {
+ char b[256];
+ if (strftime(b, sizeof(b), s, stm))
+ lua_pushstring(L, b);
+ else
+ return luaL_error(L, LUA_QL("date") " format too long");
+ }
+ return 1;
+}
+
+
+static int os_time (lua_State *L) {
+ time_t t;
+ if (lua_isnoneornil(L, 1)) /* called without args? */
+ t = time(NULL); /* get current time */
+ else {
+ struct tm ts;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_settop(L, 1); /* make sure table is at the top */
+ ts.tm_sec = getfield(L, "sec", 0);
+ ts.tm_min = getfield(L, "min", 0);
+ ts.tm_hour = getfield(L, "hour", 12);
+ ts.tm_mday = getfield(L, "day", -1);
+ ts.tm_mon = getfield(L, "month", -1) - 1;
+ ts.tm_year = getfield(L, "year", -1) - 1900;
+ ts.tm_isdst = getboolfield(L, "isdst");
+ t = mktime(&ts);
+ }
+ if (t == (time_t)(-1))
+ lua_pushnil(L);
+ else
+ lua_pushnumber(L, (lua_Number)t);
+ return 1;
+}
+
+
+static int os_difftime (lua_State *L) {
+ lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)),
+ (time_t)(luaL_optnumber(L, 2, 0))));
+ return 1;
+}
+
+/* }====================================================== */
+
+
+static int os_setlocale (lua_State *L) {
+ static const int cat[] = {LC_ALL, LC_COLLATE, LC_CTYPE, LC_MONETARY,
+ LC_NUMERIC, LC_TIME};
+ static const char *const catnames[] = {"all", "collate", "ctype", "monetary",
+ "numeric", "time", NULL};
+ const char *l = lua_tostring(L, 1);
+ int op = luaL_checkoption(L, 2, "all", catnames);
+ luaL_argcheck(L, l || lua_isnoneornil(L, 1), 1, "string expected");
+ lua_pushstring(L, setlocale(cat[op], l));
+ return 1;
+}
+
+
+static int os_exit (lua_State *L) {
+ exit(luaL_optint(L, 1, EXIT_SUCCESS));
+ return 0; /* to avoid warnings */
+}
+
+static const luaL_Reg syslib[] = {
+ {"clock", os_clock},
+ {"date", os_date},
+ {"difftime", os_difftime},
+ {"execute", os_execute},
+ {"exit", os_exit},
+ {"getenv", os_getenv},
+ {"remove", os_remove},
+ {"rename", os_rename},
+ {"setlocale", os_setlocale},
+ {"time", os_time},
+ {"tmpname", os_tmpname},
+ {NULL, NULL}
+};
+
+/* }====================================================== */
+
+
+
+LUALIB_API int luaopen_os (lua_State *L) {
+ luaL_register(L, LUA_OSLIBNAME, syslib);
+ return 1;
+}
+
diff --git a/deps/lua/src/lparser.c b/deps/lua/src/lparser.c
new file mode 100644
index 0000000000000000000000000000000000000000..b40ee794fe785d60578b49c8471e23c590703126
--- /dev/null
+++ b/deps/lua/src/lparser.c
@@ -0,0 +1,1336 @@
+/*
+** $Id: lparser.c,v 2.40 2005/12/22 16:19:56 roberto Exp $
+** Lua Parser
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lparser_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lcode.h"
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "llex.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lparser.h"
+#include "lstate.h"
+#include "lstring.h"
+
+
+
+
+#define hasmultret(k) ((k) == VCALL || (k) == VVARARG)
+
+#define getlocvar(fs, i) ((fs)->f->locvars[(fs)->actvar[i]])
+
+#define luaY_checklimit(fs,v,l,m) if ((v)>(l)) errorlimit(fs,l,m)
+
+
+/*
+** nodes for block list (list of active blocks)
+*/
+typedef struct BlockCnt {
+ struct BlockCnt *previous; /* chain */
+ int breaklist; /* list of jumps out of this loop */
+ lu_byte nactvar; /* # active locals outside the breakable structure */
+ lu_byte upval; /* true if some variable in the block is an upvalue */
+ lu_byte isbreakable; /* true if `block' is a loop */
+} BlockCnt;
+
+
+
+/*
+** prototypes for recursive non-terminal functions
+*/
+static void chunk (LexState *ls);
+static void expr (LexState *ls, expdesc *v);
+
+
+static void anchor_token (LexState *ls) {
+ if (ls->t.token == TK_NAME || ls->t.token == TK_STRING) {
+ TString *ts = ls->t.seminfo.ts;
+ luaX_newstring(ls, getstr(ts), ts->tsv.len);
+ }
+}
+
+
+static void error_expected (LexState *ls, int token) {
+ luaX_syntaxerror(ls,
+ luaO_pushfstring(ls->L, LUA_QS " expected", luaX_token2str(ls, token)));
+}
+
+
+static void errorlimit (FuncState *fs, int limit, const char *what) {
+ const char *msg = (fs->f->linedefined == 0) ?
+ luaO_pushfstring(fs->L, "main function has more than %d %s", limit, what) :
+ luaO_pushfstring(fs->L, "function at line %d has more than %d %s",
+ fs->f->linedefined, limit, what);
+ luaX_lexerror(fs->ls, msg, 0);
+}
+
+
+static int testnext (LexState *ls, int c) {
+ if (ls->t.token == c) {
+ luaX_next(ls);
+ return 1;
+ }
+ else return 0;
+}
+
+
+static void check (LexState *ls, int c) {
+ if (ls->t.token != c)
+ error_expected(ls, c);
+}
+
+static void checknext (LexState *ls, int c) {
+ check(ls, c);
+ luaX_next(ls);
+}
+
+
+#define check_condition(ls,c,msg) { if (!(c)) luaX_syntaxerror(ls, msg); }
+
+
+
+static void check_match (LexState *ls, int what, int who, int where) {
+ if (!testnext(ls, what)) {
+ if (where == ls->linenumber)
+ error_expected(ls, what);
+ else {
+ luaX_syntaxerror(ls, luaO_pushfstring(ls->L,
+ LUA_QS " expected (to close " LUA_QS " at line %d)",
+ luaX_token2str(ls, what), luaX_token2str(ls, who), where));
+ }
+ }
+}
+
+
+static TString *str_checkname (LexState *ls) {
+ TString *ts;
+ check(ls, TK_NAME);
+ ts = ls->t.seminfo.ts;
+ luaX_next(ls);
+ return ts;
+}
+
+
+static void init_exp (expdesc *e, expkind k, int i) {
+ e->f = e->t = NO_JUMP;
+ e->k = k;
+ e->u.s.info = i;
+}
+
+
+static void codestring (LexState *ls, expdesc *e, TString *s) {
+ init_exp(e, VK, luaK_stringK(ls->fs, s));
+}
+
+
+static void checkname(LexState *ls, expdesc *e) {
+ codestring(ls, e, str_checkname(ls));
+}
+
+
+static int registerlocalvar (LexState *ls, TString *varname) {
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int oldsize = f->sizelocvars;
+ luaM_growvector(ls->L, f->locvars, fs->nlocvars, f->sizelocvars,
+ LocVar, SHRT_MAX, "too many local variables");
+ while (oldsize < f->sizelocvars) f->locvars[oldsize++].varname = NULL;
+ f->locvars[fs->nlocvars].varname = varname;
+ luaC_objbarrier(ls->L, f, varname);
+ return fs->nlocvars++;
+}
+
+
+#define new_localvarliteral(ls,v,n) \
+ new_localvar(ls, luaX_newstring(ls, "" v, (sizeof(v)/sizeof(char))-1), n)
+
+
+static void new_localvar (LexState *ls, TString *name, int n) {
+ FuncState *fs = ls->fs;
+ luaY_checklimit(fs, fs->nactvar+n+1, LUAI_MAXVARS, "local variables");
+ fs->actvar[fs->nactvar+n] = cast(unsigned short, registerlocalvar(ls, name));
+}
+
+
+static void adjustlocalvars (LexState *ls, int nvars) {
+ FuncState *fs = ls->fs;
+ fs->nactvar = cast_byte(fs->nactvar + nvars);
+ for (; nvars; nvars--) {
+ getlocvar(fs, fs->nactvar - nvars).startpc = fs->pc;
+ }
+}
+
+
+static void removevars (LexState *ls, int tolevel) {
+ FuncState *fs = ls->fs;
+ while (fs->nactvar > tolevel)
+ getlocvar(fs, --fs->nactvar).endpc = fs->pc;
+}
+
+
+static int indexupvalue (FuncState *fs, TString *name, expdesc *v) {
+ int i;
+ Proto *f = fs->f;
+ int oldsize = f->sizeupvalues;
+ for (i=0; inups; i++) {
+ if (fs->upvalues[i].k == v->k && fs->upvalues[i].info == v->u.s.info) {
+ lua_assert(f->upvalues[i] == name);
+ return i;
+ }
+ }
+ /* new one */
+ luaY_checklimit(fs, f->nups + 1, LUAI_MAXUPVALUES, "upvalues");
+ luaM_growvector(fs->L, f->upvalues, f->nups, f->sizeupvalues,
+ TString *, MAX_INT, "");
+ while (oldsize < f->sizeupvalues) f->upvalues[oldsize++] = NULL;
+ f->upvalues[f->nups] = name;
+ luaC_objbarrier(fs->L, f, name);
+ lua_assert(v->k == VLOCAL || v->k == VUPVAL);
+ fs->upvalues[f->nups].k = cast_byte(v->k);
+ fs->upvalues[f->nups].info = cast_byte(v->u.s.info);
+ return f->nups++;
+}
+
+
+static int searchvar (FuncState *fs, TString *n) {
+ int i;
+ for (i=fs->nactvar-1; i >= 0; i--) {
+ if (n == getlocvar(fs, i).varname)
+ return i;
+ }
+ return -1; /* not found */
+}
+
+
+static void markupval (FuncState *fs, int level) {
+ BlockCnt *bl = fs->bl;
+ while (bl && bl->nactvar > level) bl = bl->previous;
+ if (bl) bl->upval = 1;
+}
+
+
+static int singlevaraux (FuncState *fs, TString *n, expdesc *var, int base) {
+ if (fs == NULL) { /* no more levels? */
+ init_exp(var, VGLOBAL, NO_REG); /* default is global variable */
+ return VGLOBAL;
+ }
+ else {
+ int v = searchvar(fs, n); /* look up at current level */
+ if (v >= 0) {
+ init_exp(var, VLOCAL, v);
+ if (!base)
+ markupval(fs, v); /* local will be used as an upval */
+ return VLOCAL;
+ }
+ else { /* not found at current level; try upper one */
+ if (singlevaraux(fs->prev, n, var, 0) == VGLOBAL)
+ return VGLOBAL;
+ var->u.s.info = indexupvalue(fs, n, var); /* else was LOCAL or UPVAL */
+ var->k = VUPVAL; /* upvalue in this level */
+ return VUPVAL;
+ }
+ }
+}
+
+
+static void singlevar (LexState *ls, expdesc *var) {
+ TString *varname = str_checkname(ls);
+ FuncState *fs = ls->fs;
+ if (singlevaraux(fs, varname, var, 1) == VGLOBAL)
+ var->u.s.info = luaK_stringK(fs, varname); /* info points to global name */
+}
+
+
+static void adjust_assign (LexState *ls, int nvars, int nexps, expdesc *e) {
+ FuncState *fs = ls->fs;
+ int extra = nvars - nexps;
+ if (hasmultret(e->k)) {
+ extra++; /* includes call itself */
+ if (extra < 0) extra = 0;
+ luaK_setreturns(fs, e, extra); /* last exp. provides the difference */
+ if (extra > 1) luaK_reserveregs(fs, extra-1);
+ }
+ else {
+ if (e->k != VVOID) luaK_exp2nextreg(fs, e); /* close last expression */
+ if (extra > 0) {
+ int reg = fs->freereg;
+ luaK_reserveregs(fs, extra);
+ luaK_nil(fs, reg, extra);
+ }
+ }
+}
+
+
+static void enterlevel (LexState *ls) {
+ if (++ls->L->nCcalls > LUAI_MAXCCALLS)
+ luaX_lexerror(ls, "chunk has too many syntax levels", 0);
+}
+
+
+#define leavelevel(ls) ((ls)->L->nCcalls--)
+
+
+static void enterblock (FuncState *fs, BlockCnt *bl, lu_byte isbreakable) {
+ bl->breaklist = NO_JUMP;
+ bl->isbreakable = isbreakable;
+ bl->nactvar = fs->nactvar;
+ bl->upval = 0;
+ bl->previous = fs->bl;
+ fs->bl = bl;
+ lua_assert(fs->freereg == fs->nactvar);
+}
+
+
+static void leaveblock (FuncState *fs) {
+ BlockCnt *bl = fs->bl;
+ fs->bl = bl->previous;
+ removevars(fs->ls, bl->nactvar);
+ if (bl->upval)
+ luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0);
+ lua_assert(!bl->isbreakable || !bl->upval); /* loops have no body */
+ lua_assert(bl->nactvar == fs->nactvar);
+ fs->freereg = fs->nactvar; /* free registers */
+ luaK_patchtohere(fs, bl->breaklist);
+}
+
+
+static void pushclosure (LexState *ls, FuncState *func, expdesc *v) {
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int oldsize = f->sizep;
+ int i;
+ luaM_growvector(ls->L, f->p, fs->np, f->sizep, Proto *,
+ MAXARG_Bx, "constant table overflow");
+ while (oldsize < f->sizep) f->p[oldsize++] = NULL;
+ f->p[fs->np++] = func->f;
+ luaC_objbarrier(ls->L, f, func->f);
+ init_exp(v, VRELOCABLE, luaK_codeABx(fs, OP_CLOSURE, 0, fs->np-1));
+ for (i=0; if->nups; i++) {
+ OpCode o = (func->upvalues[i].k == VLOCAL) ? OP_MOVE : OP_GETUPVAL;
+ luaK_codeABC(fs, o, 0, func->upvalues[i].info, 0);
+ }
+}
+
+
+static void open_func (LexState *ls, FuncState *fs) {
+ lua_State *L = ls->L;
+ Proto *f = luaF_newproto(L);
+ fs->f = f;
+ fs->prev = ls->fs; /* linked list of funcstates */
+ fs->ls = ls;
+ fs->L = L;
+ ls->fs = fs;
+ fs->pc = 0;
+ fs->lasttarget = -1;
+ fs->jpc = NO_JUMP;
+ fs->freereg = 0;
+ fs->nk = 0;
+ fs->np = 0;
+ fs->nlocvars = 0;
+ fs->nactvar = 0;
+ fs->bl = NULL;
+ f->source = ls->source;
+ f->maxstacksize = 2; /* registers 0/1 are always valid */
+ fs->h = luaH_new(L, 0, 0);
+ /* anchor table of constants and prototype (to avoid being collected) */
+ sethvalue2s(L, L->top, fs->h);
+ incr_top(L);
+ setptvalue2s(L, L->top, f);
+ incr_top(L);
+}
+
+
+static void close_func (LexState *ls) {
+ lua_State *L = ls->L;
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ removevars(ls, 0);
+ luaK_ret(fs, 0, 0); /* final return */
+ luaM_reallocvector(L, f->code, f->sizecode, fs->pc, Instruction);
+ f->sizecode = fs->pc;
+ luaM_reallocvector(L, f->lineinfo, f->sizelineinfo, fs->pc, int);
+ f->sizelineinfo = fs->pc;
+ luaM_reallocvector(L, f->k, f->sizek, fs->nk, TValue);
+ f->sizek = fs->nk;
+ luaM_reallocvector(L, f->p, f->sizep, fs->np, Proto *);
+ f->sizep = fs->np;
+ luaM_reallocvector(L, f->locvars, f->sizelocvars, fs->nlocvars, LocVar);
+ f->sizelocvars = fs->nlocvars;
+ luaM_reallocvector(L, f->upvalues, f->sizeupvalues, f->nups, TString *);
+ f->sizeupvalues = f->nups;
+ lua_assert(luaG_checkcode(f));
+ lua_assert(fs->bl == NULL);
+ ls->fs = fs->prev;
+ L->top -= 2; /* remove table and prototype from the stack */
+ /* last token read was anchored in defunct function; must reanchor it */
+ if (fs) anchor_token(ls);
+}
+
+
+Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) {
+ struct LexState lexstate;
+ struct FuncState funcstate;
+ lexstate.buff = buff;
+ luaX_setinput(L, &lexstate, z, luaS_new(L, name));
+ open_func(&lexstate, &funcstate);
+ funcstate.f->is_vararg = VARARG_ISVARARG; /* main func. is always vararg */
+ luaX_next(&lexstate); /* read first token */
+ chunk(&lexstate);
+ check(&lexstate, TK_EOS);
+ close_func(&lexstate);
+ lua_assert(funcstate.prev == NULL);
+ lua_assert(funcstate.f->nups == 0);
+ lua_assert(lexstate.fs == NULL);
+ return funcstate.f;
+}
+
+
+
+/*============================================================*/
+/* GRAMMAR RULES */
+/*============================================================*/
+
+
+static void field (LexState *ls, expdesc *v) {
+ /* field -> ['.' | ':'] NAME */
+ FuncState *fs = ls->fs;
+ expdesc key;
+ luaK_exp2anyreg(fs, v);
+ luaX_next(ls); /* skip the dot or colon */
+ checkname(ls, &key);
+ luaK_indexed(fs, v, &key);
+}
+
+
+static void yindex (LexState *ls, expdesc *v) {
+ /* index -> '[' expr ']' */
+ luaX_next(ls); /* skip the '[' */
+ expr(ls, v);
+ luaK_exp2val(ls->fs, v);
+ checknext(ls, ']');
+}
+
+
+/*
+** {======================================================================
+** Rules for Constructors
+** =======================================================================
+*/
+
+
+struct ConsControl {
+ expdesc v; /* last list item read */
+ expdesc *t; /* table descriptor */
+ int nh; /* total number of `record' elements */
+ int na; /* total number of array elements */
+ int tostore; /* number of array elements pending to be stored */
+};
+
+
+static void recfield (LexState *ls, struct ConsControl *cc) {
+ /* recfield -> (NAME | `['exp1`]') = exp1 */
+ FuncState *fs = ls->fs;
+ int reg = ls->fs->freereg;
+ expdesc key, val;
+ if (ls->t.token == TK_NAME) {
+ luaY_checklimit(fs, cc->nh, MAX_INT, "items in a constructor");
+ checkname(ls, &key);
+ }
+ else /* ls->t.token == '[' */
+ yindex(ls, &key);
+ cc->nh++;
+ checknext(ls, '=');
+ luaK_exp2RK(fs, &key);
+ expr(ls, &val);
+ luaK_codeABC(fs, OP_SETTABLE, cc->t->u.s.info, luaK_exp2RK(fs, &key),
+ luaK_exp2RK(fs, &val));
+ fs->freereg = reg; /* free registers */
+}
+
+
+static void closelistfield (FuncState *fs, struct ConsControl *cc) {
+ if (cc->v.k == VVOID) return; /* there is no list item */
+ luaK_exp2nextreg(fs, &cc->v);
+ cc->v.k = VVOID;
+ if (cc->tostore == LFIELDS_PER_FLUSH) {
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); /* flush */
+ cc->tostore = 0; /* no more items pending */
+ }
+}
+
+
+static void lastlistfield (FuncState *fs, struct ConsControl *cc) {
+ if (cc->tostore == 0) return;
+ if (hasmultret(cc->v.k)) {
+ luaK_setmultret(fs, &cc->v);
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, LUA_MULTRET);
+ cc->na--; /* do not count last expression (unknown number of elements) */
+ }
+ else {
+ if (cc->v.k != VVOID)
+ luaK_exp2nextreg(fs, &cc->v);
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore);
+ }
+}
+
+
+static void listfield (LexState *ls, struct ConsControl *cc) {
+ expr(ls, &cc->v);
+ luaY_checklimit(ls->fs, cc->na, MAXARG_Bx, "items in a constructor");
+ cc->na++;
+ cc->tostore++;
+}
+
+
+static void constructor (LexState *ls, expdesc *t) {
+ /* constructor -> ?? */
+ FuncState *fs = ls->fs;
+ int line = ls->linenumber;
+ int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0);
+ struct ConsControl cc;
+ cc.na = cc.nh = cc.tostore = 0;
+ cc.t = t;
+ init_exp(t, VRELOCABLE, pc);
+ init_exp(&cc.v, VVOID, 0); /* no value (yet) */
+ luaK_exp2nextreg(ls->fs, t); /* fix it at stack top (for gc) */
+ checknext(ls, '{');
+ do {
+ lua_assert(cc.v.k == VVOID || cc.tostore > 0);
+ if (ls->t.token == '}') break;
+ closelistfield(fs, &cc);
+ switch(ls->t.token) {
+ case TK_NAME: { /* may be listfields or recfields */
+ luaX_lookahead(ls);
+ if (ls->lookahead.token != '=') /* expression? */
+ listfield(ls, &cc);
+ else
+ recfield(ls, &cc);
+ break;
+ }
+ case '[': { /* constructor_item -> recfield */
+ recfield(ls, &cc);
+ break;
+ }
+ default: { /* constructor_part -> listfield */
+ listfield(ls, &cc);
+ break;
+ }
+ }
+ } while (testnext(ls, ',') || testnext(ls, ';'));
+ check_match(ls, '}', '{', line);
+ lastlistfield(fs, &cc);
+ SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */
+ SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */
+}
+
+/* }====================================================================== */
+
+
+
+static void parlist (LexState *ls) {
+ /* parlist -> [ param { `,' param } ] */
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int nparams = 0;
+ f->is_vararg = 0;
+ if (ls->t.token != ')') { /* is `parlist' not empty? */
+ do {
+ switch (ls->t.token) {
+ case TK_NAME: { /* param -> NAME */
+ new_localvar(ls, str_checkname(ls), nparams++);
+ break;
+ }
+ case TK_DOTS: { /* param -> `...' */
+ luaX_next(ls);
+#if defined(LUA_COMPAT_VARARG)
+ /* use `arg' as default name */
+ new_localvarliteral(ls, "arg", nparams++);
+ f->is_vararg = VARARG_HASARG | VARARG_NEEDSARG;
+#endif
+ f->is_vararg |= VARARG_ISVARARG;
+ break;
+ }
+ default: luaX_syntaxerror(ls, " or " LUA_QL("...") " expected");
+ }
+ } while (!f->is_vararg && testnext(ls, ','));
+ }
+ adjustlocalvars(ls, nparams);
+ f->numparams = cast_byte(fs->nactvar - (f->is_vararg & VARARG_HASARG));
+ luaK_reserveregs(fs, fs->nactvar); /* reserve register for parameters */
+}
+
+
+static void body (LexState *ls, expdesc *e, int needself, int line) {
+ /* body -> `(' parlist `)' chunk END */
+ FuncState new_fs;
+ open_func(ls, &new_fs);
+ new_fs.f->linedefined = line;
+ checknext(ls, '(');
+ if (needself) {
+ new_localvarliteral(ls, "self", 0);
+ adjustlocalvars(ls, 1);
+ }
+ parlist(ls);
+ checknext(ls, ')');
+ chunk(ls);
+ new_fs.f->lastlinedefined = ls->linenumber;
+ check_match(ls, TK_END, TK_FUNCTION, line);
+ close_func(ls);
+ pushclosure(ls, &new_fs, e);
+}
+
+
+static int explist1 (LexState *ls, expdesc *v) {
+ /* explist1 -> expr { `,' expr } */
+ int n = 1; /* at least one expression */
+ expr(ls, v);
+ while (testnext(ls, ',')) {
+ luaK_exp2nextreg(ls->fs, v);
+ expr(ls, v);
+ n++;
+ }
+ return n;
+}
+
+
+static void funcargs (LexState *ls, expdesc *f) {
+ FuncState *fs = ls->fs;
+ expdesc args;
+ int base, nparams;
+ int line = ls->linenumber;
+ switch (ls->t.token) {
+ case '(': { /* funcargs -> `(' [ explist1 ] `)' */
+ if (line != ls->lastline)
+ luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)");
+ luaX_next(ls);
+ if (ls->t.token == ')') /* arg list is empty? */
+ args.k = VVOID;
+ else {
+ explist1(ls, &args);
+ luaK_setmultret(fs, &args);
+ }
+ check_match(ls, ')', '(', line);
+ break;
+ }
+ case '{': { /* funcargs -> constructor */
+ constructor(ls, &args);
+ break;
+ }
+ case TK_STRING: { /* funcargs -> STRING */
+ codestring(ls, &args, ls->t.seminfo.ts);
+ luaX_next(ls); /* must use `seminfo' before `next' */
+ break;
+ }
+ default: {
+ luaX_syntaxerror(ls, "function arguments expected");
+ return;
+ }
+ }
+ lua_assert(f->k == VNONRELOC);
+ base = f->u.s.info; /* base register for call */
+ if (hasmultret(args.k))
+ nparams = LUA_MULTRET; /* open call */
+ else {
+ if (args.k != VVOID)
+ luaK_exp2nextreg(fs, &args); /* close last argument */
+ nparams = fs->freereg - (base+1);
+ }
+ init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2));
+ luaK_fixline(fs, line);
+ fs->freereg = base+1; /* call remove function and arguments and leaves
+ (unless changed) one result */
+}
+
+
+
+
+/*
+** {======================================================================
+** Expression parsing
+** =======================================================================
+*/
+
+
+static void prefixexp (LexState *ls, expdesc *v) {
+ /* prefixexp -> NAME | '(' expr ')' */
+ switch (ls->t.token) {
+ case '(': {
+ int line = ls->linenumber;
+ luaX_next(ls);
+ expr(ls, v);
+ check_match(ls, ')', '(', line);
+ luaK_dischargevars(ls->fs, v);
+ return;
+ }
+ case TK_NAME: {
+ singlevar(ls, v);
+ return;
+ }
+ default: {
+ luaX_syntaxerror(ls, "unexpected symbol");
+ return;
+ }
+ }
+}
+
+
+static void primaryexp (LexState *ls, expdesc *v) {
+ /* primaryexp ->
+ prefixexp { `.' NAME | `[' exp `]' | `:' NAME funcargs | funcargs } */
+ FuncState *fs = ls->fs;
+ prefixexp(ls, v);
+ for (;;) {
+ switch (ls->t.token) {
+ case '.': { /* field */
+ field(ls, v);
+ break;
+ }
+ case '[': { /* `[' exp1 `]' */
+ expdesc key;
+ luaK_exp2anyreg(fs, v);
+ yindex(ls, &key);
+ luaK_indexed(fs, v, &key);
+ break;
+ }
+ case ':': { /* `:' NAME funcargs */
+ expdesc key;
+ luaX_next(ls);
+ checkname(ls, &key);
+ luaK_self(fs, v, &key);
+ funcargs(ls, v);
+ break;
+ }
+ case '(': case TK_STRING: case '{': { /* funcargs */
+ luaK_exp2nextreg(fs, v);
+ funcargs(ls, v);
+ break;
+ }
+ default: return;
+ }
+ }
+}
+
+
+static void simpleexp (LexState *ls, expdesc *v) {
+ /* simpleexp -> NUMBER | STRING | NIL | true | false | ... |
+ constructor | FUNCTION body | primaryexp */
+ switch (ls->t.token) {
+ case TK_NUMBER: {
+ init_exp(v, VKNUM, 0);
+ v->u.nval = ls->t.seminfo.r;
+ break;
+ }
+ case TK_STRING: {
+ codestring(ls, v, ls->t.seminfo.ts);
+ break;
+ }
+ case TK_NIL: {
+ init_exp(v, VNIL, 0);
+ break;
+ }
+ case TK_TRUE: {
+ init_exp(v, VTRUE, 0);
+ break;
+ }
+ case TK_FALSE: {
+ init_exp(v, VFALSE, 0);
+ break;
+ }
+ case TK_DOTS: { /* vararg */
+ FuncState *fs = ls->fs;
+ check_condition(ls, fs->f->is_vararg,
+ "cannot use " LUA_QL("...") " outside a vararg function");
+ fs->f->is_vararg &= ~VARARG_NEEDSARG; /* don't need 'arg' */
+ init_exp(v, VVARARG, luaK_codeABC(fs, OP_VARARG, 0, 1, 0));
+ break;
+ }
+ case '{': { /* constructor */
+ constructor(ls, v);
+ return;
+ }
+ case TK_FUNCTION: {
+ luaX_next(ls);
+ body(ls, v, 0, ls->linenumber);
+ return;
+ }
+ default: {
+ primaryexp(ls, v);
+ return;
+ }
+ }
+ luaX_next(ls);
+}
+
+
+static UnOpr getunopr (int op) {
+ switch (op) {
+ case TK_NOT: return OPR_NOT;
+ case '-': return OPR_MINUS;
+ case '#': return OPR_LEN;
+ default: return OPR_NOUNOPR;
+ }
+}
+
+
+static BinOpr getbinopr (int op) {
+ switch (op) {
+ case '+': return OPR_ADD;
+ case '-': return OPR_SUB;
+ case '*': return OPR_MUL;
+ case '/': return OPR_DIV;
+ case '%': return OPR_MOD;
+ case '^': return OPR_POW;
+ case TK_CONCAT: return OPR_CONCAT;
+ case TK_NE: return OPR_NE;
+ case TK_EQ: return OPR_EQ;
+ case '<': return OPR_LT;
+ case TK_LE: return OPR_LE;
+ case '>': return OPR_GT;
+ case TK_GE: return OPR_GE;
+ case TK_AND: return OPR_AND;
+ case TK_OR: return OPR_OR;
+ default: return OPR_NOBINOPR;
+ }
+}
+
+
+static const struct {
+ lu_byte left; /* left priority for each binary operator */
+ lu_byte right; /* right priority */
+} priority[] = { /* ORDER OPR */
+ {6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, /* `+' `-' `/' `%' */
+ {10, 9}, {5, 4}, /* power and concat (right associative) */
+ {3, 3}, {3, 3}, /* equality and inequality */
+ {3, 3}, {3, 3}, {3, 3}, {3, 3}, /* order */
+ {2, 2}, {1, 1} /* logical (and/or) */
+};
+
+#define UNARY_PRIORITY 8 /* priority for unary operators */
+
+
+/*
+** subexpr -> (simpleexp | unop subexpr) { binop subexpr }
+** where `binop' is any binary operator with a priority higher than `limit'
+*/
+static BinOpr subexpr (LexState *ls, expdesc *v, unsigned int limit) {
+ BinOpr op;
+ UnOpr uop;
+ enterlevel(ls);
+ uop = getunopr(ls->t.token);
+ if (uop != OPR_NOUNOPR) {
+ luaX_next(ls);
+ subexpr(ls, v, UNARY_PRIORITY);
+ luaK_prefix(ls->fs, uop, v);
+ }
+ else simpleexp(ls, v);
+ /* expand while operators have priorities higher than `limit' */
+ op = getbinopr(ls->t.token);
+ while (op != OPR_NOBINOPR && priority[op].left > limit) {
+ expdesc v2;
+ BinOpr nextop;
+ luaX_next(ls);
+ luaK_infix(ls->fs, op, v);
+ /* read sub-expression with higher priority */
+ nextop = subexpr(ls, &v2, priority[op].right);
+ luaK_posfix(ls->fs, op, v, &v2);
+ op = nextop;
+ }
+ leavelevel(ls);
+ return op; /* return first untreated operator */
+}
+
+
+static void expr (LexState *ls, expdesc *v) {
+ subexpr(ls, v, 0);
+}
+
+/* }==================================================================== */
+
+
+
+/*
+** {======================================================================
+** Rules for Statements
+** =======================================================================
+*/
+
+
+static int block_follow (int token) {
+ switch (token) {
+ case TK_ELSE: case TK_ELSEIF: case TK_END:
+ case TK_UNTIL: case TK_EOS:
+ return 1;
+ default: return 0;
+ }
+}
+
+
+static void block (LexState *ls) {
+ /* block -> chunk */
+ FuncState *fs = ls->fs;
+ BlockCnt bl;
+ enterblock(fs, &bl, 0);
+ chunk(ls);
+ lua_assert(bl.breaklist == NO_JUMP);
+ leaveblock(fs);
+}
+
+
+/*
+** structure to chain all variables in the left-hand side of an
+** assignment
+*/
+struct LHS_assign {
+ struct LHS_assign *prev;
+ expdesc v; /* variable (global, local, upvalue, or indexed) */
+};
+
+
+/*
+** check whether, in an assignment to a local variable, the local variable
+** is needed in a previous assignment (to a table). If so, save original
+** local value in a safe place and use this safe copy in the previous
+** assignment.
+*/
+static void check_conflict (LexState *ls, struct LHS_assign *lh, expdesc *v) {
+ FuncState *fs = ls->fs;
+ int extra = fs->freereg; /* eventual position to save local variable */
+ int conflict = 0;
+ for (; lh; lh = lh->prev) {
+ if (lh->v.k == VINDEXED) {
+ if (lh->v.u.s.info == v->u.s.info) { /* conflict? */
+ conflict = 1;
+ lh->v.u.s.info = extra; /* previous assignment will use safe copy */
+ }
+ if (lh->v.u.s.aux == v->u.s.info) { /* conflict? */
+ conflict = 1;
+ lh->v.u.s.aux = extra; /* previous assignment will use safe copy */
+ }
+ }
+ }
+ if (conflict) {
+ luaK_codeABC(fs, OP_MOVE, fs->freereg, v->u.s.info, 0); /* make copy */
+ luaK_reserveregs(fs, 1);
+ }
+}
+
+
+static void assignment (LexState *ls, struct LHS_assign *lh, int nvars) {
+ expdesc e;
+ check_condition(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED,
+ "syntax error");
+ if (testnext(ls, ',')) { /* assignment -> `,' primaryexp assignment */
+ struct LHS_assign nv;
+ nv.prev = lh;
+ primaryexp(ls, &nv.v);
+ if (nv.v.k == VLOCAL)
+ check_conflict(ls, lh, &nv.v);
+ assignment(ls, &nv, nvars+1);
+ }
+ else { /* assignment -> `=' explist1 */
+ int nexps;
+ checknext(ls, '=');
+ nexps = explist1(ls, &e);
+ if (nexps != nvars) {
+ adjust_assign(ls, nvars, nexps, &e);
+ if (nexps > nvars)
+ ls->fs->freereg -= nexps - nvars; /* remove extra values */
+ }
+ else {
+ luaK_setoneret(ls->fs, &e); /* close last expression */
+ luaK_storevar(ls->fs, &lh->v, &e);
+ return; /* avoid default */
+ }
+ }
+ init_exp(&e, VNONRELOC, ls->fs->freereg-1); /* default assignment */
+ luaK_storevar(ls->fs, &lh->v, &e);
+}
+
+
+static int cond (LexState *ls) {
+ /* cond -> exp */
+ expdesc v;
+ expr(ls, &v); /* read condition */
+ if (v.k == VNIL) v.k = VFALSE; /* `falses' are all equal here */
+ luaK_goiftrue(ls->fs, &v);
+ return v.f;
+}
+
+
+static void breakstat (LexState *ls) {
+ FuncState *fs = ls->fs;
+ BlockCnt *bl = fs->bl;
+ int upval = 0;
+ while (bl && !bl->isbreakable) {
+ upval |= bl->upval;
+ bl = bl->previous;
+ }
+ if (!bl)
+ luaX_syntaxerror(ls, "no loop to break");
+ if (upval)
+ luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0);
+ luaK_concat(fs, &bl->breaklist, luaK_jump(fs));
+}
+
+
+static void whilestat (LexState *ls, int line) {
+ /* whilestat -> WHILE cond DO block END */
+ FuncState *fs = ls->fs;
+ int whileinit;
+ int condexit;
+ BlockCnt bl;
+ luaX_next(ls); /* skip WHILE */
+ whileinit = luaK_getlabel(fs);
+ condexit = cond(ls);
+ enterblock(fs, &bl, 1);
+ checknext(ls, TK_DO);
+ block(ls);
+ luaK_patchlist(fs, luaK_jump(fs), whileinit);
+ check_match(ls, TK_END, TK_WHILE, line);
+ leaveblock(fs);
+ luaK_patchtohere(fs, condexit); /* false conditions finish the loop */
+}
+
+
+static void repeatstat (LexState *ls, int line) {
+ /* repeatstat -> REPEAT block UNTIL cond */
+ int condexit;
+ FuncState *fs = ls->fs;
+ int repeat_init = luaK_getlabel(fs);
+ BlockCnt bl1, bl2;
+ enterblock(fs, &bl1, 1); /* loop block */
+ enterblock(fs, &bl2, 0); /* scope block */
+ luaX_next(ls); /* skip REPEAT */
+ chunk(ls);
+ check_match(ls, TK_UNTIL, TK_REPEAT, line);
+ condexit = cond(ls); /* read condition (inside scope block) */
+ if (!bl2.upval) { /* no upvalues? */
+ leaveblock(fs); /* finish scope */
+ luaK_patchlist(ls->fs, condexit, repeat_init); /* close the loop */
+ }
+ else { /* complete semantics when there are upvalues */
+ breakstat(ls); /* if condition then break */
+ luaK_patchtohere(ls->fs, condexit); /* else... */
+ leaveblock(fs); /* finish scope... */
+ luaK_patchlist(ls->fs, luaK_jump(fs), repeat_init); /* and repeat */
+ }
+ leaveblock(fs); /* finish loop */
+}
+
+
+static int exp1 (LexState *ls) {
+ expdesc e;
+ int k;
+ expr(ls, &e);
+ k = e.k;
+ luaK_exp2nextreg(ls->fs, &e);
+ return k;
+}
+
+
+static void forbody (LexState *ls, int base, int line, int nvars, int isnum) {
+ /* forbody -> DO block */
+ BlockCnt bl;
+ FuncState *fs = ls->fs;
+ int prep, endfor;
+ adjustlocalvars(ls, 3); /* control variables */
+ checknext(ls, TK_DO);
+ prep = isnum ? luaK_codeAsBx(fs, OP_FORPREP, base, NO_JUMP) : luaK_jump(fs);
+ enterblock(fs, &bl, 0); /* scope for declared variables */
+ adjustlocalvars(ls, nvars);
+ luaK_reserveregs(fs, nvars);
+ block(ls);
+ leaveblock(fs); /* end of scope for declared variables */
+ luaK_patchtohere(fs, prep);
+ endfor = (isnum) ? luaK_codeAsBx(fs, OP_FORLOOP, base, NO_JUMP) :
+ luaK_codeABC(fs, OP_TFORLOOP, base, 0, nvars);
+ luaK_fixline(fs, line); /* pretend that `OP_FOR' starts the loop */
+ luaK_patchlist(fs, (isnum ? endfor : luaK_jump(fs)), prep + 1);
+}
+
+
+static void fornum (LexState *ls, TString *varname, int line) {
+ /* fornum -> NAME = exp1,exp1[,exp1] forbody */
+ FuncState *fs = ls->fs;
+ int base = fs->freereg;
+ new_localvarliteral(ls, "(for index)", 0);
+ new_localvarliteral(ls, "(for limit)", 1);
+ new_localvarliteral(ls, "(for step)", 2);
+ new_localvar(ls, varname, 3);
+ checknext(ls, '=');
+ exp1(ls); /* initial value */
+ checknext(ls, ',');
+ exp1(ls); /* limit */
+ if (testnext(ls, ','))
+ exp1(ls); /* optional step */
+ else { /* default step = 1 */
+ luaK_codeABx(fs, OP_LOADK, fs->freereg, luaK_numberK(fs, 1));
+ luaK_reserveregs(fs, 1);
+ }
+ forbody(ls, base, line, 1, 1);
+}
+
+
+static void forlist (LexState *ls, TString *indexname) {
+ /* forlist -> NAME {,NAME} IN explist1 forbody */
+ FuncState *fs = ls->fs;
+ expdesc e;
+ int nvars = 0;
+ int line;
+ int base = fs->freereg;
+ /* create control variables */
+ new_localvarliteral(ls, "(for generator)", nvars++);
+ new_localvarliteral(ls, "(for state)", nvars++);
+ new_localvarliteral(ls, "(for control)", nvars++);
+ /* create declared variables */
+ new_localvar(ls, indexname, nvars++);
+ while (testnext(ls, ','))
+ new_localvar(ls, str_checkname(ls), nvars++);
+ checknext(ls, TK_IN);
+ line = ls->linenumber;
+ adjust_assign(ls, 3, explist1(ls, &e), &e);
+ luaK_checkstack(fs, 3); /* extra space to call generator */
+ forbody(ls, base, line, nvars - 3, 0);
+}
+
+
+static void forstat (LexState *ls, int line) {
+ /* forstat -> FOR (fornum | forlist) END */
+ FuncState *fs = ls->fs;
+ TString *varname;
+ BlockCnt bl;
+ enterblock(fs, &bl, 1); /* scope for loop and control variables */
+ luaX_next(ls); /* skip `for' */
+ varname = str_checkname(ls); /* first variable name */
+ switch (ls->t.token) {
+ case '=': fornum(ls, varname, line); break;
+ case ',': case TK_IN: forlist(ls, varname); break;
+ default: luaX_syntaxerror(ls, LUA_QL("=") " or " LUA_QL("in") " expected");
+ }
+ check_match(ls, TK_END, TK_FOR, line);
+ leaveblock(fs); /* loop scope (`break' jumps to this point) */
+}
+
+
+static int test_then_block (LexState *ls) {
+ /* test_then_block -> [IF | ELSEIF] cond THEN block */
+ int condexit;
+ luaX_next(ls); /* skip IF or ELSEIF */
+ condexit = cond(ls);
+ checknext(ls, TK_THEN);
+ block(ls); /* `then' part */
+ return condexit;
+}
+
+
+static void ifstat (LexState *ls, int line) {
+ /* ifstat -> IF cond THEN block {ELSEIF cond THEN block} [ELSE block] END */
+ FuncState *fs = ls->fs;
+ int flist;
+ int escapelist = NO_JUMP;
+ flist = test_then_block(ls); /* IF cond THEN block */
+ while (ls->t.token == TK_ELSEIF) {
+ luaK_concat(fs, &escapelist, luaK_jump(fs));
+ luaK_patchtohere(fs, flist);
+ flist = test_then_block(ls); /* ELSEIF cond THEN block */
+ }
+ if (ls->t.token == TK_ELSE) {
+ luaK_concat(fs, &escapelist, luaK_jump(fs));
+ luaK_patchtohere(fs, flist);
+ luaX_next(ls); /* skip ELSE (after patch, for correct line info) */
+ block(ls); /* `else' part */
+ }
+ else
+ luaK_concat(fs, &escapelist, flist);
+ luaK_patchtohere(fs, escapelist);
+ check_match(ls, TK_END, TK_IF, line);
+}
+
+
+static void localfunc (LexState *ls) {
+ expdesc v, b;
+ FuncState *fs = ls->fs;
+ new_localvar(ls, str_checkname(ls), 0);
+ init_exp(&v, VLOCAL, fs->freereg);
+ luaK_reserveregs(fs, 1);
+ adjustlocalvars(ls, 1);
+ body(ls, &b, 0, ls->linenumber);
+ luaK_storevar(fs, &v, &b);
+ /* debug information will only see the variable after this point! */
+ getlocvar(fs, fs->nactvar - 1).startpc = fs->pc;
+}
+
+
+static void localstat (LexState *ls) {
+ /* stat -> LOCAL NAME {`,' NAME} [`=' explist1] */
+ int nvars = 0;
+ int nexps;
+ expdesc e;
+ do {
+ new_localvar(ls, str_checkname(ls), nvars++);
+ } while (testnext(ls, ','));
+ if (testnext(ls, '='))
+ nexps = explist1(ls, &e);
+ else {
+ e.k = VVOID;
+ nexps = 0;
+ }
+ adjust_assign(ls, nvars, nexps, &e);
+ adjustlocalvars(ls, nvars);
+}
+
+
+static int funcname (LexState *ls, expdesc *v) {
+ /* funcname -> NAME {field} [`:' NAME] */
+ int needself = 0;
+ singlevar(ls, v);
+ while (ls->t.token == '.')
+ field(ls, v);
+ if (ls->t.token == ':') {
+ needself = 1;
+ field(ls, v);
+ }
+ return needself;
+}
+
+
+static void funcstat (LexState *ls, int line) {
+ /* funcstat -> FUNCTION funcname body */
+ int needself;
+ expdesc v, b;
+ luaX_next(ls); /* skip FUNCTION */
+ needself = funcname(ls, &v);
+ body(ls, &b, needself, line);
+ luaK_storevar(ls->fs, &v, &b);
+ luaK_fixline(ls->fs, line); /* definition `happens' in the first line */
+}
+
+
+static void exprstat (LexState *ls) {
+ /* stat -> func | assignment */
+ FuncState *fs = ls->fs;
+ struct LHS_assign v;
+ primaryexp(ls, &v.v);
+ if (v.v.k == VCALL) /* stat -> func */
+ SETARG_C(getcode(fs, &v.v), 1); /* call statement uses no results */
+ else { /* stat -> assignment */
+ v.prev = NULL;
+ assignment(ls, &v, 1);
+ }
+}
+
+
+static void retstat (LexState *ls) {
+ /* stat -> RETURN explist */
+ FuncState *fs = ls->fs;
+ expdesc e;
+ int first, nret; /* registers with returned values */
+ luaX_next(ls); /* skip RETURN */
+ if (block_follow(ls->t.token) || ls->t.token == ';')
+ first = nret = 0; /* return no values */
+ else {
+ nret = explist1(ls, &e); /* optional return values */
+ if (hasmultret(e.k)) {
+ luaK_setmultret(fs, &e);
+ if (e.k == VCALL && nret == 1) { /* tail call? */
+ SET_OPCODE(getcode(fs,&e), OP_TAILCALL);
+ lua_assert(GETARG_A(getcode(fs,&e)) == fs->nactvar);
+ }
+ first = fs->nactvar;
+ nret = LUA_MULTRET; /* return all values */
+ }
+ else {
+ if (nret == 1) /* only one single value? */
+ first = luaK_exp2anyreg(fs, &e);
+ else {
+ luaK_exp2nextreg(fs, &e); /* values must go to the `stack' */
+ first = fs->nactvar; /* return all `active' values */
+ lua_assert(nret == fs->freereg - first);
+ }
+ }
+ }
+ luaK_ret(fs, first, nret);
+}
+
+
+static int statement (LexState *ls) {
+ int line = ls->linenumber; /* may be needed for error messages */
+ switch (ls->t.token) {
+ case TK_IF: { /* stat -> ifstat */
+ ifstat(ls, line);
+ return 0;
+ }
+ case TK_WHILE: { /* stat -> whilestat */
+ whilestat(ls, line);
+ return 0;
+ }
+ case TK_DO: { /* stat -> DO block END */
+ luaX_next(ls); /* skip DO */
+ block(ls);
+ check_match(ls, TK_END, TK_DO, line);
+ return 0;
+ }
+ case TK_FOR: { /* stat -> forstat */
+ forstat(ls, line);
+ return 0;
+ }
+ case TK_REPEAT: { /* stat -> repeatstat */
+ repeatstat(ls, line);
+ return 0;
+ }
+ case TK_FUNCTION: {
+ funcstat(ls, line); /* stat -> funcstat */
+ return 0;
+ }
+ case TK_LOCAL: { /* stat -> localstat */
+ luaX_next(ls); /* skip LOCAL */
+ if (testnext(ls, TK_FUNCTION)) /* local function? */
+ localfunc(ls);
+ else
+ localstat(ls);
+ return 0;
+ }
+ case TK_RETURN: { /* stat -> retstat */
+ retstat(ls);
+ return 1; /* must be last statement */
+ }
+ case TK_BREAK: { /* stat -> breakstat */
+ luaX_next(ls); /* skip BREAK */
+ breakstat(ls);
+ return 1; /* must be last statement */
+ }
+ default: {
+ exprstat(ls);
+ return 0; /* to avoid warnings */
+ }
+ }
+}
+
+
+static void chunk (LexState *ls) {
+ /* chunk -> { stat [`;'] } */
+ int islast = 0;
+ enterlevel(ls);
+ while (!islast && !block_follow(ls->t.token)) {
+ islast = statement(ls);
+ testnext(ls, ';');
+ lua_assert(ls->fs->f->maxstacksize >= ls->fs->freereg &&
+ ls->fs->freereg >= ls->fs->nactvar);
+ ls->fs->freereg = ls->fs->nactvar; /* free registers */
+ }
+ leavelevel(ls);
+}
+
+/* }====================================================================== */
diff --git a/deps/lua/src/lparser.h b/deps/lua/src/lparser.h
new file mode 100644
index 0000000000000000000000000000000000000000..d5e6e81d0d4bba0728a00cf6a82a388e339dc8b2
--- /dev/null
+++ b/deps/lua/src/lparser.h
@@ -0,0 +1,83 @@
+/*
+** $Id: lparser.h,v 1.56 2005/10/03 14:02:40 roberto Exp $
+** Lua Parser
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lparser_h
+#define lparser_h
+
+#include "llimits.h"
+#include "lobject.h"
+#include "ltable.h"
+#include "lzio.h"
+
+
+/*
+** Expression descriptor
+*/
+
+typedef enum {
+ VVOID, /* no value */
+ VNIL,
+ VTRUE,
+ VFALSE,
+ VK, /* info = index of constant in `k' */
+ VKNUM, /* nval = numerical value */
+ VLOCAL, /* info = local register */
+ VUPVAL, /* info = index of upvalue in `upvalues' */
+ VGLOBAL, /* info = index of table; aux = index of global name in `k' */
+ VINDEXED, /* info = table register; aux = index register (or `k') */
+ VJMP, /* info = instruction pc */
+ VRELOCABLE, /* info = instruction pc */
+ VNONRELOC, /* info = result register */
+ VCALL, /* info = instruction pc */
+ VVARARG /* info = instruction pc */
+} expkind;
+
+typedef struct expdesc {
+ expkind k;
+ union {
+ struct { int info, aux; } s;
+ lua_Number nval;
+ } u;
+ int t; /* patch list of `exit when true' */
+ int f; /* patch list of `exit when false' */
+} expdesc;
+
+
+typedef struct upvaldesc {
+ lu_byte k;
+ lu_byte info;
+} upvaldesc;
+
+
+struct BlockCnt; /* defined in lparser.c */
+
+
+/* state needed to generate code for a given function */
+typedef struct FuncState {
+ Proto *f; /* current function header */
+ Table *h; /* table to find (and reuse) elements in `k' */
+ struct FuncState *prev; /* enclosing function */
+ struct LexState *ls; /* lexical state */
+ struct lua_State *L; /* copy of the Lua state */
+ struct BlockCnt *bl; /* chain of current blocks */
+ int pc; /* next position to code (equivalent to `ncode') */
+ int lasttarget; /* `pc' of last `jump target' */
+ int jpc; /* list of pending jumps to `pc' */
+ int freereg; /* first free register */
+ int nk; /* number of elements in `k' */
+ int np; /* number of elements in `p' */
+ short nlocvars; /* number of elements in `locvars' */
+ lu_byte nactvar; /* number of active local variables */
+ upvaldesc upvalues[LUAI_MAXUPVALUES]; /* upvalues */
+ unsigned short actvar[LUAI_MAXVARS]; /* declared-variable stack */
+} FuncState;
+
+
+LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff,
+ const char *name);
+
+
+#endif
diff --git a/deps/lua/src/lstate.c b/deps/lua/src/lstate.c
new file mode 100644
index 0000000000000000000000000000000000000000..77e93fbdfa056a05c5de3b9ad09dee4e3e2e46f7
--- /dev/null
+++ b/deps/lua/src/lstate.c
@@ -0,0 +1,214 @@
+/*
+** $Id: lstate.c,v 2.35 2005/10/06 20:46:25 roberto Exp $
+** Global State
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lstate_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lgc.h"
+#include "llex.h"
+#include "lmem.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+
+
+#define state_size(x) (sizeof(x) + LUAI_EXTRASPACE)
+#define fromstate(l) (cast(lu_byte *, (l)) - LUAI_EXTRASPACE)
+#define tostate(l) (cast(lua_State *, cast(lu_byte *, l) + LUAI_EXTRASPACE))
+
+
+/*
+** Main thread combines a thread state and the global state
+*/
+typedef struct LG {
+ lua_State l;
+ global_State g;
+} LG;
+
+
+
+static void stack_init (lua_State *L1, lua_State *L) {
+ /* initialize CallInfo array */
+ L1->base_ci = luaM_newvector(L, BASIC_CI_SIZE, CallInfo);
+ L1->ci = L1->base_ci;
+ L1->size_ci = BASIC_CI_SIZE;
+ L1->end_ci = L1->base_ci + L1->size_ci - 1;
+ /* initialize stack array */
+ L1->stack = luaM_newvector(L, BASIC_STACK_SIZE + EXTRA_STACK, TValue);
+ L1->stacksize = BASIC_STACK_SIZE + EXTRA_STACK;
+ L1->top = L1->stack;
+ L1->stack_last = L1->stack+(L1->stacksize - EXTRA_STACK)-1;
+ /* initialize first ci */
+ L1->ci->func = L1->top;
+ setnilvalue(L1->top++); /* `function' entry for this `ci' */
+ L1->base = L1->ci->base = L1->top;
+ L1->ci->top = L1->top + LUA_MINSTACK;
+}
+
+
+static void freestack (lua_State *L, lua_State *L1) {
+ luaM_freearray(L, L1->base_ci, L1->size_ci, CallInfo);
+ luaM_freearray(L, L1->stack, L1->stacksize, TValue);
+}
+
+
+/*
+** open parts that may cause memory-allocation errors
+*/
+static void f_luaopen (lua_State *L, void *ud) {
+ global_State *g = G(L);
+ UNUSED(ud);
+ stack_init(L, L); /* init stack */
+ sethvalue(L, gt(L), luaH_new(L, 0, 2)); /* table of globals */
+ sethvalue(L, registry(L), luaH_new(L, 0, 2)); /* registry */
+ luaS_resize(L, MINSTRTABSIZE); /* initial size of string table */
+ luaT_init(L);
+ luaX_init(L);
+ luaS_fix(luaS_newliteral(L, MEMERRMSG));
+ g->GCthreshold = 4*g->totalbytes;
+}
+
+
+static void preinit_state (lua_State *L, global_State *g) {
+ G(L) = g;
+ L->stack = NULL;
+ L->stacksize = 0;
+ L->errorJmp = NULL;
+ L->hook = NULL;
+ L->hookmask = 0;
+ L->basehookcount = 0;
+ L->allowhook = 1;
+ resethookcount(L);
+ L->openupval = NULL;
+ L->size_ci = 0;
+ L->nCcalls = 0;
+ L->status = 0;
+ L->base_ci = L->ci = NULL;
+ L->savedpc = NULL;
+ L->errfunc = 0;
+ setnilvalue(gt(L));
+}
+
+
+static void close_state (lua_State *L) {
+ global_State *g = G(L);
+ luaF_close(L, L->stack); /* close all upvalues for this thread */
+ luaC_freeall(L); /* collect all objects */
+ lua_assert(g->rootgc == obj2gco(L));
+ lua_assert(g->strt.nuse == 0);
+ luaM_freearray(L, G(L)->strt.hash, G(L)->strt.size, TString *);
+ luaZ_freebuffer(L, &g->buff);
+ freestack(L, L);
+ lua_assert(g->totalbytes == sizeof(LG));
+ (*g->frealloc)(g->ud, fromstate(L), state_size(LG), 0);
+}
+
+
+lua_State *luaE_newthread (lua_State *L) {
+ lua_State *L1 = tostate(luaM_malloc(L, state_size(lua_State)));
+ luaC_link(L, obj2gco(L1), LUA_TTHREAD);
+ preinit_state(L1, G(L));
+ stack_init(L1, L); /* init stack */
+ setobj2n(L, gt(L1), gt(L)); /* share table of globals */
+ L1->hookmask = L->hookmask;
+ L1->basehookcount = L->basehookcount;
+ L1->hook = L->hook;
+ resethookcount(L1);
+ lua_assert(iswhite(obj2gco(L1)));
+ return L1;
+}
+
+
+void luaE_freethread (lua_State *L, lua_State *L1) {
+ luaF_close(L1, L1->stack); /* close all upvalues for this thread */
+ lua_assert(L1->openupval == NULL);
+ luai_userstatefree(L1);
+ freestack(L, L1);
+ luaM_freemem(L, fromstate(L1), state_size(lua_State));
+}
+
+
+LUA_API lua_State *lua_newstate (lua_Alloc f, void *ud) {
+ int i;
+ lua_State *L;
+ global_State *g;
+ void *l = (*f)(ud, NULL, 0, state_size(LG));
+ if (l == NULL) return NULL;
+ L = tostate(l);
+ g = &((LG *)L)->g;
+ L->next = NULL;
+ L->tt = LUA_TTHREAD;
+ g->currentwhite = bit2mask(WHITE0BIT, FIXEDBIT);
+ L->marked = luaC_white(g);
+ set2bits(L->marked, FIXEDBIT, SFIXEDBIT);
+ preinit_state(L, g);
+ g->frealloc = f;
+ g->ud = ud;
+ g->mainthread = L;
+ g->uvhead.u.l.prev = &g->uvhead;
+ g->uvhead.u.l.next = &g->uvhead;
+ g->GCthreshold = 0; /* mark it as unfinished state */
+ g->strt.size = 0;
+ g->strt.nuse = 0;
+ g->strt.hash = NULL;
+ setnilvalue(registry(L));
+ luaZ_initbuffer(L, &g->buff);
+ g->panic = NULL;
+ g->gcstate = GCSpause;
+ g->rootgc = obj2gco(L);
+ g->sweepstrgc = 0;
+ g->sweepgc = &g->rootgc;
+ g->gray = NULL;
+ g->grayagain = NULL;
+ g->weak = NULL;
+ g->tmudata = NULL;
+ g->totalbytes = sizeof(LG);
+ g->gcpause = LUAI_GCPAUSE;
+ g->gcstepmul = LUAI_GCMUL;
+ g->gcdept = 0;
+ for (i=0; imt[i] = NULL;
+ if (luaD_rawrunprotected(L, f_luaopen, NULL) != 0) {
+ /* memory allocation error: free partial state */
+ close_state(L);
+ L = NULL;
+ }
+ else
+ luai_userstateopen(L);
+ return L;
+}
+
+
+static void callallgcTM (lua_State *L, void *ud) {
+ UNUSED(ud);
+ luaC_callGCTM(L); /* call GC metamethods for all udata */
+}
+
+
+LUA_API void lua_close (lua_State *L) {
+ L = G(L)->mainthread; /* only the main thread can be closed */
+ luai_userstateclose(L);
+ lua_lock(L);
+ luaF_close(L, L->stack); /* close all upvalues for this thread */
+ luaC_separateudata(L, 1); /* separate udata that have GC metamethods */
+ L->errfunc = 0; /* no error function during GC metamethods */
+ do { /* repeat until no more errors */
+ L->ci = L->base_ci;
+ L->base = L->top = L->ci->base;
+ L->nCcalls = 0;
+ } while (luaD_rawrunprotected(L, callallgcTM, NULL) != 0);
+ lua_assert(G(L)->tmudata == NULL);
+ close_state(L);
+}
+
diff --git a/deps/lua/src/lstate.h b/deps/lua/src/lstate.h
new file mode 100644
index 0000000000000000000000000000000000000000..d296a4cab99e1cb1ec5f09bc0cd1bfc74e1cd5cc
--- /dev/null
+++ b/deps/lua/src/lstate.h
@@ -0,0 +1,168 @@
+/*
+** $Id: lstate.h,v 2.24 2006/02/06 18:27:59 roberto Exp $
+** Global State
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lstate_h
+#define lstate_h
+
+#include "lua.h"
+
+#include "lobject.h"
+#include "ltm.h"
+#include "lzio.h"
+
+
+
+struct lua_longjmp; /* defined in ldo.c */
+
+
+/* table of globals */
+#define gt(L) (&L->l_gt)
+
+/* registry */
+#define registry(L) (&G(L)->l_registry)
+
+
+/* extra stack space to handle TM calls and some other extras */
+#define EXTRA_STACK 5
+
+
+#define BASIC_CI_SIZE 8
+
+#define BASIC_STACK_SIZE (2*LUA_MINSTACK)
+
+
+
+typedef struct stringtable {
+ GCObject **hash;
+ lu_int32 nuse; /* number of elements */
+ int size;
+} stringtable;
+
+
+/*
+** informations about a call
+*/
+typedef struct CallInfo {
+ StkId base; /* base for this function */
+ StkId func; /* function index in the stack */
+ StkId top; /* top for this function */
+ const Instruction *savedpc;
+ int nresults; /* expected number of results from this function */
+ int tailcalls; /* number of tail calls lost under this entry */
+} CallInfo;
+
+
+
+#define curr_func(L) (clvalue(L->ci->func))
+#define ci_func(ci) (clvalue((ci)->func))
+#define f_isLua(ci) (!ci_func(ci)->c.isC)
+#define isLua(ci) (ttisfunction((ci)->func) && f_isLua(ci))
+
+
+/*
+** `global state', shared by all threads of this state
+*/
+typedef struct global_State {
+ stringtable strt; /* hash table for strings */
+ lua_Alloc frealloc; /* function to reallocate memory */
+ void *ud; /* auxiliary data to `frealloc' */
+ lu_byte currentwhite;
+ lu_byte gcstate; /* state of garbage collector */
+ int sweepstrgc; /* position of sweep in `strt' */
+ GCObject *rootgc; /* list of all collectable objects */
+ GCObject **sweepgc; /* position of sweep in `rootgc' */
+ GCObject *gray; /* list of gray objects */
+ GCObject *grayagain; /* list of objects to be traversed atomically */
+ GCObject *weak; /* list of weak tables (to be cleared) */
+ GCObject *tmudata; /* last element of list of userdata to be GC */
+ Mbuffer buff; /* temporary buffer for string concatentation */
+ lu_mem GCthreshold;
+ lu_mem totalbytes; /* number of bytes currently allocated */
+ lu_mem estimate; /* an estimate of number of bytes actually in use */
+ lu_mem gcdept; /* how much GC is `behind schedule' */
+ int gcpause; /* size of pause between successive GCs */
+ int gcstepmul; /* GC `granularity' */
+ lua_CFunction panic; /* to be called in unprotected errors */
+ TValue l_registry;
+ struct lua_State *mainthread;
+ UpVal uvhead; /* head of double-linked list of all open upvalues */
+ struct Table *mt[NUM_TAGS]; /* metatables for basic types */
+ TString *tmname[TM_N]; /* array with tag-method names */
+} global_State;
+
+
+/*
+** `per thread' state
+*/
+struct lua_State {
+ CommonHeader;
+ lu_byte status;
+ StkId top; /* first free slot in the stack */
+ StkId base; /* base of current function */
+ global_State *l_G;
+ CallInfo *ci; /* call info for current function */
+ const Instruction *savedpc; /* `savedpc' of current function */
+ StkId stack_last; /* last free slot in the stack */
+ StkId stack; /* stack base */
+ CallInfo *end_ci; /* points after end of ci array*/
+ CallInfo *base_ci; /* array of CallInfo's */
+ int stacksize;
+ int size_ci; /* size of array `base_ci' */
+ unsigned short nCcalls; /* number of nested C calls */
+ lu_byte hookmask;
+ lu_byte allowhook;
+ int basehookcount;
+ int hookcount;
+ lua_Hook hook;
+ TValue l_gt; /* table of globals */
+ TValue env; /* temporary place for environments */
+ GCObject *openupval; /* list of open upvalues in this stack */
+ GCObject *gclist;
+ struct lua_longjmp *errorJmp; /* current error recover point */
+ ptrdiff_t errfunc; /* current error handling function (stack index) */
+};
+
+
+#define G(L) (L->l_G)
+
+
+/*
+** Union of all collectable objects
+*/
+union GCObject {
+ GCheader gch;
+ union TString ts;
+ union Udata u;
+ union Closure cl;
+ struct Table h;
+ struct Proto p;
+ struct UpVal uv;
+ struct lua_State th; /* thread */
+};
+
+
+/* macros to convert a GCObject into a specific value */
+#define rawgco2ts(o) check_exp((o)->gch.tt == LUA_TSTRING, &((o)->ts))
+#define gco2ts(o) (&rawgco2ts(o)->tsv)
+#define rawgco2u(o) check_exp((o)->gch.tt == LUA_TUSERDATA, &((o)->u))
+#define gco2u(o) (&rawgco2u(o)->uv)
+#define gco2cl(o) check_exp((o)->gch.tt == LUA_TFUNCTION, &((o)->cl))
+#define gco2h(o) check_exp((o)->gch.tt == LUA_TTABLE, &((o)->h))
+#define gco2p(o) check_exp((o)->gch.tt == LUA_TPROTO, &((o)->p))
+#define gco2uv(o) check_exp((o)->gch.tt == LUA_TUPVAL, &((o)->uv))
+#define ngcotouv(o) \
+ check_exp((o) == NULL || (o)->gch.tt == LUA_TUPVAL, &((o)->uv))
+#define gco2th(o) check_exp((o)->gch.tt == LUA_TTHREAD, &((o)->th))
+
+/* macro to convert any Lua object into a GCObject */
+#define obj2gco(v) (cast(GCObject *, (v)))
+
+
+LUAI_FUNC lua_State *luaE_newthread (lua_State *L);
+LUAI_FUNC void luaE_freethread (lua_State *L, lua_State *L1);
+
+#endif
+
diff --git a/deps/lua/src/lstring.c b/deps/lua/src/lstring.c
new file mode 100644
index 0000000000000000000000000000000000000000..4319930c96d07e61fe98b697b87c538280cf38c9
--- /dev/null
+++ b/deps/lua/src/lstring.c
@@ -0,0 +1,111 @@
+/*
+** $Id: lstring.c,v 2.8 2005/12/22 16:19:56 roberto Exp $
+** String table (keeps all strings handled by Lua)
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lstring_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lmem.h"
+#include "lobject.h"
+#include "lstate.h"
+#include "lstring.h"
+
+
+
+void luaS_resize (lua_State *L, int newsize) {
+ GCObject **newhash;
+ stringtable *tb;
+ int i;
+ if (G(L)->gcstate == GCSsweepstring)
+ return; /* cannot resize during GC traverse */
+ newhash = luaM_newvector(L, newsize, GCObject *);
+ tb = &G(L)->strt;
+ for (i=0; isize; i++) {
+ GCObject *p = tb->hash[i];
+ while (p) { /* for each node in the list */
+ GCObject *next = p->gch.next; /* save next */
+ unsigned int h = gco2ts(p)->hash;
+ int h1 = lmod(h, newsize); /* new position */
+ lua_assert(cast_int(h%newsize) == lmod(h, newsize));
+ p->gch.next = newhash[h1]; /* chain it */
+ newhash[h1] = p;
+ p = next;
+ }
+ }
+ luaM_freearray(L, tb->hash, tb->size, TString *);
+ tb->size = newsize;
+ tb->hash = newhash;
+}
+
+
+static TString *newlstr (lua_State *L, const char *str, size_t l,
+ unsigned int h) {
+ TString *ts;
+ stringtable *tb;
+ if (l+1 > (MAX_SIZET - sizeof(TString))/sizeof(char))
+ luaM_toobig(L);
+ ts = cast(TString *, luaM_malloc(L, (l+1)*sizeof(char)+sizeof(TString)));
+ ts->tsv.len = l;
+ ts->tsv.hash = h;
+ ts->tsv.marked = luaC_white(G(L));
+ ts->tsv.tt = LUA_TSTRING;
+ ts->tsv.reserved = 0;
+ memcpy(ts+1, str, l*sizeof(char));
+ ((char *)(ts+1))[l] = '\0'; /* ending 0 */
+ tb = &G(L)->strt;
+ h = lmod(h, tb->size);
+ ts->tsv.next = tb->hash[h]; /* chain new entry */
+ tb->hash[h] = obj2gco(ts);
+ tb->nuse++;
+ if (tb->nuse > cast(lu_int32, tb->size) && tb->size <= MAX_INT/2)
+ luaS_resize(L, tb->size*2); /* too crowded */
+ return ts;
+}
+
+
+TString *luaS_newlstr (lua_State *L, const char *str, size_t l) {
+ GCObject *o;
+ unsigned int h = cast(unsigned int, l); /* seed */
+ size_t step = (l>>5)+1; /* if string is too long, don't hash all its chars */
+ size_t l1;
+ for (l1=l; l1>=step; l1-=step) /* compute hash */
+ h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1]));
+ for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)];
+ o != NULL;
+ o = o->gch.next) {
+ TString *ts = rawgco2ts(o);
+ if (ts->tsv.len == l && (memcmp(str, getstr(ts), l) == 0)) {
+ /* string may be dead */
+ if (isdead(G(L), o)) changewhite(o);
+ return ts;
+ }
+ }
+ return newlstr(L, str, l, h); /* not found */
+}
+
+
+Udata *luaS_newudata (lua_State *L, size_t s, Table *e) {
+ Udata *u;
+ if (s > MAX_SIZET - sizeof(Udata))
+ luaM_toobig(L);
+ u = cast(Udata *, luaM_malloc(L, s + sizeof(Udata)));
+ u->uv.marked = luaC_white(G(L)); /* is not finalized */
+ u->uv.tt = LUA_TUSERDATA;
+ u->uv.len = s;
+ u->uv.metatable = NULL;
+ u->uv.env = e;
+ /* chain it on udata list (after main thread) */
+ u->uv.next = G(L)->mainthread->next;
+ G(L)->mainthread->next = obj2gco(u);
+ return u;
+}
+
diff --git a/deps/lua/src/lstring.h b/deps/lua/src/lstring.h
new file mode 100644
index 0000000000000000000000000000000000000000..1d2e91ea13a31bcfbdbee4c63dc17d1e99e9f8e4
--- /dev/null
+++ b/deps/lua/src/lstring.h
@@ -0,0 +1,31 @@
+/*
+** $Id: lstring.h,v 1.43 2005/04/25 19:24:10 roberto Exp $
+** String table (keep all strings handled by Lua)
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lstring_h
+#define lstring_h
+
+
+#include "lgc.h"
+#include "lobject.h"
+#include "lstate.h"
+
+
+#define sizestring(s) (sizeof(union TString)+((s)->len+1)*sizeof(char))
+
+#define sizeudata(u) (sizeof(union Udata)+(u)->len)
+
+#define luaS_new(L, s) (luaS_newlstr(L, s, strlen(s)))
+#define luaS_newliteral(L, s) (luaS_newlstr(L, "" s, \
+ (sizeof(s)/sizeof(char))-1))
+
+#define luaS_fix(s) l_setbit((s)->tsv.marked, FIXEDBIT)
+
+LUAI_FUNC void luaS_resize (lua_State *L, int newsize);
+LUAI_FUNC Udata *luaS_newudata (lua_State *L, size_t s, Table *e);
+LUAI_FUNC TString *luaS_newlstr (lua_State *L, const char *str, size_t l);
+
+
+#endif
diff --git a/deps/lua/src/lstrlib.c b/deps/lua/src/lstrlib.c
new file mode 100644
index 0000000000000000000000000000000000000000..84478fd106c6e96203d68db4e642302d1fba745a
--- /dev/null
+++ b/deps/lua/src/lstrlib.c
@@ -0,0 +1,863 @@
+/*
+** $Id: lstrlib.c,v 1.130 2005/12/29 15:32:11 roberto Exp $
+** Standard library for string operations and pattern-matching
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+#include
+
+#define lstrlib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+/* macro to `unsign' a character */
+#define uchar(c) ((unsigned char)(c))
+
+
+
+static int str_len (lua_State *L) {
+ size_t l;
+ luaL_checklstring(L, 1, &l);
+ lua_pushinteger(L, l);
+ return 1;
+}
+
+
+static ptrdiff_t posrelat (ptrdiff_t pos, size_t len) {
+ /* relative string position: negative means back from end */
+ return (pos>=0) ? pos : (ptrdiff_t)len+pos+1;
+}
+
+
+static int str_sub (lua_State *L) {
+ size_t l;
+ const char *s = luaL_checklstring(L, 1, &l);
+ ptrdiff_t start = posrelat(luaL_checkinteger(L, 2), l);
+ ptrdiff_t end = posrelat(luaL_optinteger(L, 3, -1), l);
+ if (start < 1) start = 1;
+ if (end > (ptrdiff_t)l) end = (ptrdiff_t)l;
+ if (start <= end)
+ lua_pushlstring(L, s+start-1, end-start+1);
+ else lua_pushliteral(L, "");
+ return 1;
+}
+
+
+static int str_reverse (lua_State *L) {
+ size_t l;
+ luaL_Buffer b;
+ const char *s = luaL_checklstring(L, 1, &l);
+ luaL_buffinit(L, &b);
+ while (l--) luaL_addchar(&b, s[l]);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static int str_lower (lua_State *L) {
+ size_t l;
+ size_t i;
+ luaL_Buffer b;
+ const char *s = luaL_checklstring(L, 1, &l);
+ luaL_buffinit(L, &b);
+ for (i=0; i 0)
+ luaL_addlstring(&b, s, l);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static int str_byte (lua_State *L) {
+ size_t l;
+ const char *s = luaL_checklstring(L, 1, &l);
+ ptrdiff_t posi = posrelat(luaL_optinteger(L, 2, 1), l);
+ ptrdiff_t pose = posrelat(luaL_optinteger(L, 3, posi), l);
+ int n, i;
+ if (posi <= 0) posi = 1;
+ if ((size_t)pose > l) pose = l;
+ if (posi > pose) return 0; /* empty interval; return no values */
+ n = (int)(pose - posi + 1);
+ if (posi + n <= pose) /* overflow? */
+ luaL_error(L, "string slice too long");
+ luaL_checkstack(L, n, "string slice too long");
+ for (i=0; i= ms->level || ms->capture[l].len == CAP_UNFINISHED)
+ return luaL_error(ms->L, "invalid capture index");
+ return l;
+}
+
+
+static int capture_to_close (MatchState *ms) {
+ int level = ms->level;
+ for (level--; level>=0; level--)
+ if (ms->capture[level].len == CAP_UNFINISHED) return level;
+ return luaL_error(ms->L, "invalid pattern capture");
+}
+
+
+static const char *classend (MatchState *ms, const char *p) {
+ switch (*p++) {
+ case L_ESC: {
+ if (*p == '\0')
+ luaL_error(ms->L, "malformed pattern (ends with " LUA_QL("%%") ")");
+ return p+1;
+ }
+ case '[': {
+ if (*p == '^') p++;
+ do { /* look for a `]' */
+ if (*p == '\0')
+ luaL_error(ms->L, "malformed pattern (missing " LUA_QL("]") ")");
+ if (*(p++) == L_ESC && *p != '\0')
+ p++; /* skip escapes (e.g. `%]') */
+ } while (*p != ']');
+ return p+1;
+ }
+ default: {
+ return p;
+ }
+ }
+}
+
+
+static int match_class (int c, int cl) {
+ int res;
+ switch (tolower(cl)) {
+ case 'a' : res = isalpha(c); break;
+ case 'c' : res = iscntrl(c); break;
+ case 'd' : res = isdigit(c); break;
+ case 'l' : res = islower(c); break;
+ case 'p' : res = ispunct(c); break;
+ case 's' : res = isspace(c); break;
+ case 'u' : res = isupper(c); break;
+ case 'w' : res = isalnum(c); break;
+ case 'x' : res = isxdigit(c); break;
+ case 'z' : res = (c == 0); break;
+ default: return (cl == c);
+ }
+ return (islower(cl) ? res : !res);
+}
+
+
+static int matchbracketclass (int c, const char *p, const char *ec) {
+ int sig = 1;
+ if (*(p+1) == '^') {
+ sig = 0;
+ p++; /* skip the `^' */
+ }
+ while (++p < ec) {
+ if (*p == L_ESC) {
+ p++;
+ if (match_class(c, uchar(*p)))
+ return sig;
+ }
+ else if ((*(p+1) == '-') && (p+2 < ec)) {
+ p+=2;
+ if (uchar(*(p-2)) <= c && c <= uchar(*p))
+ return sig;
+ }
+ else if (uchar(*p) == c) return sig;
+ }
+ return !sig;
+}
+
+
+static int singlematch (int c, const char *p, const char *ep) {
+ switch (*p) {
+ case '.': return 1; /* matches any char */
+ case L_ESC: return match_class(c, uchar(*(p+1)));
+ case '[': return matchbracketclass(c, p, ep-1);
+ default: return (uchar(*p) == c);
+ }
+}
+
+
+static const char *match (MatchState *ms, const char *s, const char *p);
+
+
+static const char *matchbalance (MatchState *ms, const char *s,
+ const char *p) {
+ if (*p == 0 || *(p+1) == 0)
+ luaL_error(ms->L, "unbalanced pattern");
+ if (*s != *p) return NULL;
+ else {
+ int b = *p;
+ int e = *(p+1);
+ int cont = 1;
+ while (++s < ms->src_end) {
+ if (*s == e) {
+ if (--cont == 0) return s+1;
+ }
+ else if (*s == b) cont++;
+ }
+ }
+ return NULL; /* string ends out of balance */
+}
+
+
+static const char *max_expand (MatchState *ms, const char *s,
+ const char *p, const char *ep) {
+ ptrdiff_t i = 0; /* counts maximum expand for item */
+ while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep))
+ i++;
+ /* keeps trying to match with the maximum repetitions */
+ while (i>=0) {
+ const char *res = match(ms, (s+i), ep+1);
+ if (res) return res;
+ i--; /* else didn't match; reduce 1 repetition to try again */
+ }
+ return NULL;
+}
+
+
+static const char *min_expand (MatchState *ms, const char *s,
+ const char *p, const char *ep) {
+ for (;;) {
+ const char *res = match(ms, s, ep+1);
+ if (res != NULL)
+ return res;
+ else if (ssrc_end && singlematch(uchar(*s), p, ep))
+ s++; /* try with one more repetition */
+ else return NULL;
+ }
+}
+
+
+static const char *start_capture (MatchState *ms, const char *s,
+ const char *p, int what) {
+ const char *res;
+ int level = ms->level;
+ if (level >= LUA_MAXCAPTURES) luaL_error(ms->L, "too many captures");
+ ms->capture[level].init = s;
+ ms->capture[level].len = what;
+ ms->level = level+1;
+ if ((res=match(ms, s, p)) == NULL) /* match failed? */
+ ms->level--; /* undo capture */
+ return res;
+}
+
+
+static const char *end_capture (MatchState *ms, const char *s,
+ const char *p) {
+ int l = capture_to_close(ms);
+ const char *res;
+ ms->capture[l].len = s - ms->capture[l].init; /* close capture */
+ if ((res = match(ms, s, p)) == NULL) /* match failed? */
+ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
+ return res;
+}
+
+
+static const char *match_capture (MatchState *ms, const char *s, int l) {
+ size_t len;
+ l = check_capture(ms, l);
+ len = ms->capture[l].len;
+ if ((size_t)(ms->src_end-s) >= len &&
+ memcmp(ms->capture[l].init, s, len) == 0)
+ return s+len;
+ else return NULL;
+}
+
+
+static const char *match (MatchState *ms, const char *s, const char *p) {
+ init: /* using goto's to optimize tail recursion */
+ switch (*p) {
+ case '(': { /* start capture */
+ if (*(p+1) == ')') /* position capture? */
+ return start_capture(ms, s, p+2, CAP_POSITION);
+ else
+ return start_capture(ms, s, p+1, CAP_UNFINISHED);
+ }
+ case ')': { /* end capture */
+ return end_capture(ms, s, p+1);
+ }
+ case L_ESC: {
+ switch (*(p+1)) {
+ case 'b': { /* balanced string? */
+ s = matchbalance(ms, s, p+2);
+ if (s == NULL) return NULL;
+ p+=4; goto init; /* else return match(ms, s, p+4); */
+ }
+ case 'f': { /* frontier? */
+ const char *ep; char previous;
+ p += 2;
+ if (*p != '[')
+ luaL_error(ms->L, "missing " LUA_QL("[") " after "
+ LUA_QL("%%f") " in pattern");
+ ep = classend(ms, p); /* points to what is next */
+ previous = (s == ms->src_init) ? '\0' : *(s-1);
+ if (matchbracketclass(uchar(previous), p, ep-1) ||
+ !matchbracketclass(uchar(*s), p, ep-1)) return NULL;
+ p=ep; goto init; /* else return match(ms, s, ep); */
+ }
+ default: {
+ if (isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
+ s = match_capture(ms, s, uchar(*(p+1)));
+ if (s == NULL) return NULL;
+ p+=2; goto init; /* else return match(ms, s, p+2) */
+ }
+ goto dflt; /* case default */
+ }
+ }
+ }
+ case '\0': { /* end of pattern */
+ return s; /* match succeeded */
+ }
+ case '$': {
+ if (*(p+1) == '\0') /* is the `$' the last char in pattern? */
+ return (s == ms->src_end) ? s : NULL; /* check end of string */
+ else goto dflt;
+ }
+ default: dflt: { /* it is a pattern item */
+ const char *ep = classend(ms, p); /* points to what is next */
+ int m = ssrc_end && singlematch(uchar(*s), p, ep);
+ switch (*ep) {
+ case '?': { /* optional */
+ const char *res;
+ if (m && ((res=match(ms, s+1, ep+1)) != NULL))
+ return res;
+ p=ep+1; goto init; /* else return match(ms, s, ep+1); */
+ }
+ case '*': { /* 0 or more repetitions */
+ return max_expand(ms, s, p, ep);
+ }
+ case '+': { /* 1 or more repetitions */
+ return (m ? max_expand(ms, s+1, p, ep) : NULL);
+ }
+ case '-': { /* 0 or more repetitions (minimum) */
+ return min_expand(ms, s, p, ep);
+ }
+ default: {
+ if (!m) return NULL;
+ s++; p=ep; goto init; /* else return match(ms, s+1, ep); */
+ }
+ }
+ }
+ }
+}
+
+
+
+static const char *lmemfind (const char *s1, size_t l1,
+ const char *s2, size_t l2) {
+ if (l2 == 0) return s1; /* empty strings are everywhere */
+ else if (l2 > l1) return NULL; /* avoids a negative `l1' */
+ else {
+ const char *init; /* to search for a `*s2' inside `s1' */
+ l2--; /* 1st char will be checked by `memchr' */
+ l1 = l1-l2; /* `s2' cannot be found after that */
+ while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
+ init++; /* 1st char is already checked */
+ if (memcmp(init, s2+1, l2) == 0)
+ return init-1;
+ else { /* correct `l1' and `s1' to try again */
+ l1 -= init-s1;
+ s1 = init;
+ }
+ }
+ return NULL; /* not found */
+ }
+}
+
+
+static void push_onecapture (MatchState *ms, int i, const char *s,
+ const char *e) {
+ if (i >= ms->level) {
+ if (i == 0) /* ms->level == 0, too */
+ lua_pushlstring(ms->L, s, e - s); /* add whole match */
+ else
+ luaL_error(ms->L, "invalid capture index");
+ }
+ else {
+ ptrdiff_t l = ms->capture[i].len;
+ if (l == CAP_UNFINISHED) luaL_error(ms->L, "unfinished capture");
+ if (l == CAP_POSITION)
+ lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
+ else
+ lua_pushlstring(ms->L, ms->capture[i].init, l);
+ }
+}
+
+
+static int push_captures (MatchState *ms, const char *s, const char *e) {
+ int i;
+ int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
+ luaL_checkstack(ms->L, nlevels, "too many captures");
+ for (i = 0; i < nlevels; i++)
+ push_onecapture(ms, i, s, e);
+ return nlevels; /* number of strings pushed */
+}
+
+
+static int str_find_aux (lua_State *L, int find) {
+ size_t l1, l2;
+ const char *s = luaL_checklstring(L, 1, &l1);
+ const char *p = luaL_checklstring(L, 2, &l2);
+ ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
+ if (init < 0) init = 0;
+ else if ((size_t)(init) > l1) init = (ptrdiff_t)l1;
+ if (find && (lua_toboolean(L, 4) || /* explicit request? */
+ strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
+ /* do a plain search */
+ const char *s2 = lmemfind(s+init, l1-init, p, l2);
+ if (s2) {
+ lua_pushinteger(L, s2-s+1);
+ lua_pushinteger(L, s2-s+l2);
+ return 2;
+ }
+ }
+ else {
+ MatchState ms;
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ const char *s1=s+init;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+l1;
+ do {
+ const char *res;
+ ms.level = 0;
+ if ((res=match(&ms, s1, p)) != NULL) {
+ if (find) {
+ lua_pushinteger(L, s1-s+1); /* start */
+ lua_pushinteger(L, res-s); /* end */
+ return push_captures(&ms, NULL, 0) + 2;
+ }
+ else
+ return push_captures(&ms, s1, res);
+ }
+ } while (s1++ < ms.src_end && !anchor);
+ }
+ lua_pushnil(L); /* not found */
+ return 1;
+}
+
+
+static int str_find (lua_State *L) {
+ return str_find_aux(L, 1);
+}
+
+
+static int str_match (lua_State *L) {
+ return str_find_aux(L, 0);
+}
+
+
+static int gmatch_aux (lua_State *L) {
+ MatchState ms;
+ size_t ls;
+ const char *s = lua_tolstring(L, lua_upvalueindex(1), &ls);
+ const char *p = lua_tostring(L, lua_upvalueindex(2));
+ const char *src;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+ls;
+ for (src = s + (size_t)lua_tointeger(L, lua_upvalueindex(3));
+ src <= ms.src_end;
+ src++) {
+ const char *e;
+ ms.level = 0;
+ if ((e = match(&ms, src, p)) != NULL) {
+ lua_Integer newstart = e-s;
+ if (e == src) newstart++; /* empty match? go at least one position */
+ lua_pushinteger(L, newstart);
+ lua_replace(L, lua_upvalueindex(3));
+ return push_captures(&ms, src, e);
+ }
+ }
+ return 0; /* not found */
+}
+
+
+static int gmatch (lua_State *L) {
+ luaL_checkstring(L, 1);
+ luaL_checkstring(L, 2);
+ lua_settop(L, 2);
+ lua_pushinteger(L, 0);
+ lua_pushcclosure(L, gmatch_aux, 3);
+ return 1;
+}
+
+
+static int gfind_nodef (lua_State *L) {
+ return luaL_error(L, LUA_QL("string.gfind") " was renamed to "
+ LUA_QL("string.gmatch"));
+}
+
+
+static void add_s (MatchState *ms, luaL_Buffer *b, const char *s,
+ const char *e) {
+ size_t l, i;
+ const char *news = lua_tolstring(ms->L, 3, &l);
+ for (i = 0; i < l; i++) {
+ if (news[i] != L_ESC)
+ luaL_addchar(b, news[i]);
+ else {
+ i++; /* skip ESC */
+ if (!isdigit(uchar(news[i])))
+ luaL_addchar(b, news[i]);
+ else if (news[i] == '0')
+ luaL_addlstring(b, s, e - s);
+ else {
+ push_onecapture(ms, news[i] - '1', s, e);
+ luaL_addvalue(b); /* add capture to accumulated result */
+ }
+ }
+ }
+}
+
+
+static void add_value (MatchState *ms, luaL_Buffer *b, const char *s,
+ const char *e) {
+ lua_State *L = ms->L;
+ switch (lua_type(L, 3)) {
+ case LUA_TNUMBER:
+ case LUA_TSTRING: {
+ add_s(ms, b, s, e);
+ return;
+ }
+ case LUA_TFUNCTION: {
+ int n;
+ lua_pushvalue(L, 3);
+ n = push_captures(ms, s, e);
+ lua_call(L, n, 1);
+ break;
+ }
+ case LUA_TTABLE: {
+ push_onecapture(ms, 0, s, e);
+ lua_gettable(L, 3);
+ break;
+ }
+ default: {
+ luaL_argerror(L, 3, "string/function/table expected");
+ return;
+ }
+ }
+ if (!lua_toboolean(L, -1)) { /* nil or false? */
+ lua_pop(L, 1);
+ lua_pushlstring(L, s, e - s); /* keep original text */
+ }
+ else if (!lua_isstring(L, -1))
+ luaL_error(L, "invalid replacement value (a %s)", luaL_typename(L, -1));
+ luaL_addvalue(b); /* add result to accumulator */
+}
+
+
+static int str_gsub (lua_State *L) {
+ size_t srcl;
+ const char *src = luaL_checklstring(L, 1, &srcl);
+ const char *p = luaL_checkstring(L, 2);
+ int max_s = luaL_optint(L, 4, srcl+1);
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ int n = 0;
+ MatchState ms;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ ms.L = L;
+ ms.src_init = src;
+ ms.src_end = src+srcl;
+ while (n < max_s) {
+ const char *e;
+ ms.level = 0;
+ e = match(&ms, src, p);
+ if (e) {
+ n++;
+ add_value(&ms, &b, src, e);
+ }
+ if (e && e>src) /* non empty match? */
+ src = e; /* skip it */
+ else if (src < ms.src_end)
+ luaL_addchar(&b, *src++);
+ else break;
+ if (anchor) break;
+ }
+ luaL_addlstring(&b, src, ms.src_end-src);
+ luaL_pushresult(&b);
+ lua_pushinteger(L, n); /* number of substitutions */
+ return 2;
+}
+
+/* }====================================================== */
+
+
+/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
+#define MAX_ITEM 512
+/* valid flags in a format specification */
+#define FLAGS "-+ #0"
+/*
+** maximum size of each format specification (such as '%-099.99d')
+** (+10 accounts for %99.99x plus margin of error)
+*/
+#define MAX_FORMAT (sizeof(FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
+
+
+static void addquoted (lua_State *L, luaL_Buffer *b, int arg) {
+ size_t l;
+ const char *s = luaL_checklstring(L, arg, &l);
+ luaL_addchar(b, '"');
+ while (l--) {
+ switch (*s) {
+ case '"': case '\\': case '\n': {
+ luaL_addchar(b, '\\');
+ luaL_addchar(b, *s);
+ break;
+ }
+ case '\0': {
+ luaL_addlstring(b, "\\000", 4);
+ break;
+ }
+ default: {
+ luaL_addchar(b, *s);
+ break;
+ }
+ }
+ s++;
+ }
+ luaL_addchar(b, '"');
+}
+
+static const char *scanformat (lua_State *L, const char *strfrmt, char *form) {
+ const char *p = strfrmt;
+ while (strchr(FLAGS, *p)) p++; /* skip flags */
+ if ((size_t)(p - strfrmt) >= sizeof(FLAGS))
+ luaL_error(L, "invalid format (repeated flags)");
+ if (isdigit(uchar(*p))) p++; /* skip width */
+ if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ if (*p == '.') {
+ p++;
+ if (isdigit(uchar(*p))) p++; /* skip precision */
+ if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ }
+ if (isdigit(uchar(*p)))
+ luaL_error(L, "invalid format (width or precision too long)");
+ *(form++) = '%';
+ strncpy(form, strfrmt, p - strfrmt + 1);
+ form += p - strfrmt + 1;
+ *form = '\0';
+ return p;
+}
+
+
+static void addintlen (char *form) {
+ size_t l = strlen(form);
+ char spec = form[l - 1];
+ strcpy(form + l - 1, LUA_INTFRMLEN);
+ form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
+ form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
+}
+
+
+static int str_format (lua_State *L) {
+ int arg = 1;
+ size_t sfl;
+ const char *strfrmt = luaL_checklstring(L, arg, &sfl);
+ const char *strfrmt_end = strfrmt+sfl;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while (strfrmt < strfrmt_end) {
+ if (*strfrmt != L_ESC)
+ luaL_addchar(&b, *strfrmt++);
+ else if (*++strfrmt == L_ESC)
+ luaL_addchar(&b, *strfrmt++); /* %% */
+ else { /* format item */
+ char form[MAX_FORMAT]; /* to store the format (`%...') */
+ char buff[MAX_ITEM]; /* to store the formatted item */
+ arg++;
+ strfrmt = scanformat(L, strfrmt, form);
+ switch (*strfrmt++) {
+ case 'c': {
+ sprintf(buff, form, (int)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'd': case 'i': {
+ addintlen(form);
+ sprintf(buff, form, (LUA_INTFRM_T)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'o': case 'u': case 'x': case 'X': {
+ addintlen(form);
+ sprintf(buff, form, (unsigned LUA_INTFRM_T)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'e': case 'E': case 'f':
+ case 'g': case 'G': {
+ sprintf(buff, form, (double)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'q': {
+ addquoted(L, &b, arg);
+ continue; /* skip the 'addsize' at the end */
+ }
+ case 's': {
+ size_t l;
+ const char *s = luaL_checklstring(L, arg, &l);
+ if (!strchr(form, '.') && l >= 100) {
+ /* no precision and string is too long to be formatted;
+ keep original string */
+ lua_pushvalue(L, arg);
+ luaL_addvalue(&b);
+ continue; /* skip the `addsize' at the end */
+ }
+ else {
+ sprintf(buff, form, s);
+ break;
+ }
+ }
+ default: { /* also treat cases `pnLlh' */
+ return luaL_error(L, "invalid option to " LUA_QL("format"));
+ }
+ }
+ luaL_addlstring(&b, buff, strlen(buff));
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static const luaL_Reg strlib[] = {
+ {"byte", str_byte},
+ {"char", str_char},
+ {"dump", str_dump},
+ {"find", str_find},
+ {"format", str_format},
+ {"gfind", gfind_nodef},
+ {"gmatch", gmatch},
+ {"gsub", str_gsub},
+ {"len", str_len},
+ {"lower", str_lower},
+ {"match", str_match},
+ {"rep", str_rep},
+ {"reverse", str_reverse},
+ {"sub", str_sub},
+ {"upper", str_upper},
+ {NULL, NULL}
+};
+
+
+static void createmetatable (lua_State *L) {
+ lua_createtable(L, 0, 1); /* create metatable for strings */
+ lua_pushliteral(L, ""); /* dummy string */
+ lua_pushvalue(L, -2);
+ lua_setmetatable(L, -2); /* set string metatable */
+ lua_pop(L, 1); /* pop dummy string */
+ lua_pushvalue(L, -2); /* string library... */
+ lua_setfield(L, -2, "__index"); /* ...is the __index metamethod */
+ lua_pop(L, 1); /* pop metatable */
+}
+
+
+/*
+** Open string library
+*/
+LUALIB_API int luaopen_string (lua_State *L) {
+ luaL_register(L, LUA_STRLIBNAME, strlib);
+#if defined(LUA_COMPAT_GFIND)
+ lua_getfield(L, -1, "gmatch");
+ lua_setfield(L, -2, "gfind");
+#endif
+ createmetatable(L);
+ return 1;
+}
+
diff --git a/deps/lua/src/ltable.c b/deps/lua/src/ltable.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc91cacd637fe4563637f8c02740dc57a9cf50de
--- /dev/null
+++ b/deps/lua/src/ltable.c
@@ -0,0 +1,588 @@
+/*
+** $Id: ltable.c,v 2.32 2006/01/18 11:49:02 roberto Exp $
+** Lua tables (hash)
+** See Copyright Notice in lua.h
+*/
+
+
+/*
+** Implementation of tables (aka arrays, objects, or hash tables).
+** Tables keep its elements in two parts: an array part and a hash part.
+** Non-negative integer keys are all candidates to be kept in the array
+** part. The actual size of the array is the largest `n' such that at
+** least half the slots between 0 and n are in use.
+** Hash uses a mix of chained scatter table with Brent's variation.
+** A main invariant of these tables is that, if an element is not
+** in its main position (i.e. the `original' position that its hash gives
+** to it), then the colliding element is in its own main position.
+** Hence even when the load factor reaches 100%, performance remains good.
+*/
+
+#include
+#include
+
+#define ltable_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lgc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lstate.h"
+#include "ltable.h"
+
+
+/*
+** max size of array part is 2^MAXBITS
+*/
+#if LUAI_BITSINT > 26
+#define MAXBITS 26
+#else
+#define MAXBITS (LUAI_BITSINT-2)
+#endif
+
+#define MAXASIZE (1 << MAXBITS)
+
+
+#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
+
+#define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
+#define hashboolean(t,p) hashpow2(t, p)
+
+
+/*
+** for some types, it is better to avoid modulus by power of 2, as
+** they tend to have many 2 factors.
+*/
+#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
+
+
+#define hashpointer(t,p) hashmod(t, IntPoint(p))
+
+
+/*
+** number of ints inside a lua_Number
+*/
+#define numints cast_int(sizeof(lua_Number)/sizeof(int))
+
+
+
+#define dummynode (&dummynode_)
+
+static const Node dummynode_ = {
+ {{NULL}, LUA_TNIL}, /* value */
+ {{{NULL}, LUA_TNIL, NULL}} /* key */
+};
+
+
+/*
+** hash for lua_Numbers
+*/
+static Node *hashnum (const Table *t, lua_Number n) {
+ unsigned int a[numints];
+ int i;
+ n += 1; /* normalize number (avoid -0) */
+ lua_assert(sizeof(a) <= sizeof(n));
+ memcpy(a, &n, sizeof(a));
+ for (i = 1; i < numints; i++) a[0] += a[i];
+ return hashmod(t, a[0]);
+}
+
+
+
+/*
+** returns the `main' position of an element in a table (that is, the index
+** of its hash value)
+*/
+static Node *mainposition (const Table *t, const TValue *key) {
+ switch (ttype(key)) {
+ case LUA_TNUMBER:
+ return hashnum(t, nvalue(key));
+ case LUA_TSTRING:
+ return hashstr(t, rawtsvalue(key));
+ case LUA_TBOOLEAN:
+ return hashboolean(t, bvalue(key));
+ case LUA_TLIGHTUSERDATA:
+ return hashpointer(t, pvalue(key));
+ default:
+ return hashpointer(t, gcvalue(key));
+ }
+}
+
+
+/*
+** returns the index for `key' if `key' is an appropriate key to live in
+** the array part of the table, -1 otherwise.
+*/
+static int arrayindex (const TValue *key) {
+ if (ttisnumber(key)) {
+ lua_Number n = nvalue(key);
+ int k;
+ lua_number2int(k, n);
+ if (luai_numeq(cast_num(k), n))
+ return k;
+ }
+ return -1; /* `key' did not match some condition */
+}
+
+
+/*
+** returns the index of a `key' for table traversals. First goes all
+** elements in the array part, then elements in the hash part. The
+** beginning of a traversal is signalled by -1.
+*/
+static int findindex (lua_State *L, Table *t, StkId key) {
+ int i;
+ if (ttisnil(key)) return -1; /* first iteration */
+ i = arrayindex(key);
+ if (0 < i && i <= t->sizearray) /* is `key' inside array part? */
+ return i-1; /* yes; that's the index (corrected to C) */
+ else {
+ Node *n = mainposition(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ /* key may be dead already, but it is ok to use it in `next' */
+ if (luaO_rawequalObj(key2tval(n), key) ||
+ (ttype(gkey(n)) == LUA_TDEADKEY && iscollectable(key) &&
+ gcvalue(gkey(n)) == gcvalue(key))) {
+ i = cast_int(n - gnode(t, 0)); /* key index in hash table */
+ /* hash elements are numbered after array ones */
+ return i + t->sizearray;
+ }
+ else n = gnext(n);
+ } while (n);
+ luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */
+ return 0; /* to avoid warnings */
+ }
+}
+
+
+int luaH_next (lua_State *L, Table *t, StkId key) {
+ int i = findindex(L, t, key); /* find original element */
+ for (i++; i < t->sizearray; i++) { /* try first array part */
+ if (!ttisnil(&t->array[i])) { /* a non-nil value? */
+ setnvalue(key, cast_num(i+1));
+ setobj2s(L, key+1, &t->array[i]);
+ return 1;
+ }
+ }
+ for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
+ if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */
+ setobj2s(L, key, key2tval(gnode(t, i)));
+ setobj2s(L, key+1, gval(gnode(t, i)));
+ return 1;
+ }
+ }
+ return 0; /* no more elements */
+}
+
+
+/*
+** {=============================================================
+** Rehash
+** ==============================================================
+*/
+
+
+static int computesizes (int nums[], int *narray) {
+ int i;
+ int twotoi; /* 2^i */
+ int a = 0; /* number of elements smaller than 2^i */
+ int na = 0; /* number of elements to go to array part */
+ int n = 0; /* optimal size for array part */
+ for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
+ if (nums[i] > 0) {
+ a += nums[i];
+ if (a > twotoi/2) { /* more than half elements present? */
+ n = twotoi; /* optimal size (till now) */
+ na = a; /* all elements smaller than n will go to array part */
+ }
+ }
+ if (a == *narray) break; /* all elements already counted */
+ }
+ *narray = n;
+ lua_assert(*narray/2 <= na && na <= *narray);
+ return na;
+}
+
+
+static int countint (const TValue *key, int *nums) {
+ int k = arrayindex(key);
+ if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */
+ nums[ceillog2(k)]++; /* count as such */
+ return 1;
+ }
+ else
+ return 0;
+}
+
+
+static int numusearray (const Table *t, int *nums) {
+ int lg;
+ int ttlg; /* 2^lg */
+ int ause = 0; /* summation of `nums' */
+ int i = 1; /* count to traverse all array keys */
+ for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */
+ int lc = 0; /* counter */
+ int lim = ttlg;
+ if (lim > t->sizearray) {
+ lim = t->sizearray; /* adjust upper limit */
+ if (i > lim)
+ break; /* no more elements to count */
+ }
+ /* count elements in range (2^(lg-1), 2^lg] */
+ for (; i <= lim; i++) {
+ if (!ttisnil(&t->array[i-1]))
+ lc++;
+ }
+ nums[lg] += lc;
+ ause += lc;
+ }
+ return ause;
+}
+
+
+static int numusehash (const Table *t, int *nums, int *pnasize) {
+ int totaluse = 0; /* total number of elements */
+ int ause = 0; /* summation of `nums' */
+ int i = sizenode(t);
+ while (i--) {
+ Node *n = &t->node[i];
+ if (!ttisnil(gval(n))) {
+ ause += countint(key2tval(n), nums);
+ totaluse++;
+ }
+ }
+ *pnasize += ause;
+ return totaluse;
+}
+
+
+static void setarrayvector (lua_State *L, Table *t, int size) {
+ int i;
+ luaM_reallocvector(L, t->array, t->sizearray, size, TValue);
+ for (i=t->sizearray; iarray[i]);
+ t->sizearray = size;
+}
+
+
+static void setnodevector (lua_State *L, Table *t, int size) {
+ int lsize;
+ if (size == 0) { /* no elements to hash part? */
+ t->node = cast(Node *, dummynode); /* use common `dummynode' */
+ lsize = 0;
+ }
+ else {
+ int i;
+ lsize = ceillog2(size);
+ if (lsize > MAXBITS)
+ luaG_runerror(L, "table overflow");
+ size = twoto(lsize);
+ t->node = luaM_newvector(L, size, Node);
+ for (i=0; ilsizenode = cast_byte(lsize);
+ t->lastfree = gnode(t, size); /* all positions are free */
+}
+
+
+static void resize (lua_State *L, Table *t, int nasize, int nhsize) {
+ int i;
+ int oldasize = t->sizearray;
+ int oldhsize = t->lsizenode;
+ Node *nold = t->node; /* save old hash ... */
+ if (nasize > oldasize) /* array part must grow? */
+ setarrayvector(L, t, nasize);
+ /* create new hash part with appropriate size */
+ setnodevector(L, t, nhsize);
+ if (nasize < oldasize) { /* array part must shrink? */
+ t->sizearray = nasize;
+ /* re-insert elements from vanishing slice */
+ for (i=nasize; iarray[i]))
+ setobjt2t(L, luaH_setnum(L, t, i+1), &t->array[i]);
+ }
+ /* shrink array */
+ luaM_reallocvector(L, t->array, oldasize, nasize, TValue);
+ }
+ /* re-insert elements from hash part */
+ for (i = twoto(oldhsize) - 1; i >= 0; i--) {
+ Node *old = nold+i;
+ if (!ttisnil(gval(old)))
+ setobjt2t(L, luaH_set(L, t, key2tval(old)), gval(old));
+ }
+ if (nold != dummynode)
+ luaM_freearray(L, nold, twoto(oldhsize), Node); /* free old array */
+}
+
+
+void luaH_resizearray (lua_State *L, Table *t, int nasize) {
+ int nsize = (t->node == dummynode) ? 0 : sizenode(t);
+ resize(L, t, nasize, nsize);
+}
+
+
+static void rehash (lua_State *L, Table *t, const TValue *ek) {
+ int nasize, na;
+ int nums[MAXBITS+1]; /* nums[i] = number of keys between 2^(i-1) and 2^i */
+ int i;
+ int totaluse;
+ for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */
+ nasize = numusearray(t, nums); /* count keys in array part */
+ totaluse = nasize; /* all those keys are integer keys */
+ totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
+ /* count extra key */
+ nasize += countint(ek, nums);
+ totaluse++;
+ /* compute new size for array part */
+ na = computesizes(nums, &nasize);
+ /* resize the table to new computed sizes */
+ resize(L, t, nasize, totaluse - na);
+}
+
+
+
+/*
+** }=============================================================
+*/
+
+
+Table *luaH_new (lua_State *L, int narray, int nhash) {
+ Table *t = luaM_new(L, Table);
+ luaC_link(L, obj2gco(t), LUA_TTABLE);
+ t->metatable = NULL;
+ t->flags = cast_byte(~0);
+ /* temporary values (kept only if some malloc fails) */
+ t->array = NULL;
+ t->sizearray = 0;
+ t->lsizenode = 0;
+ t->node = cast(Node *, dummynode);
+ setarrayvector(L, t, narray);
+ setnodevector(L, t, nhash);
+ return t;
+}
+
+
+void luaH_free (lua_State *L, Table *t) {
+ if (t->node != dummynode)
+ luaM_freearray(L, t->node, sizenode(t), Node);
+ luaM_freearray(L, t->array, t->sizearray, TValue);
+ luaM_free(L, t);
+}
+
+
+static Node *getfreepos (Table *t) {
+ while (t->lastfree-- > t->node) {
+ if (ttisnil(gkey(t->lastfree)))
+ return t->lastfree;
+ }
+ return NULL; /* could not find a free place */
+}
+
+
+
+/*
+** inserts a new key into a hash table; first, check whether key's main
+** position is free. If not, check whether colliding node is in its main
+** position or not: if it is not, move colliding node to an empty place and
+** put new key in its main position; otherwise (colliding node is in its main
+** position), new key goes to an empty position.
+*/
+static TValue *newkey (lua_State *L, Table *t, const TValue *key) {
+ Node *mp = mainposition(t, key);
+ if (!ttisnil(gval(mp)) || mp == dummynode) {
+ Node *othern;
+ Node *n = getfreepos(t); /* get a free place */
+ if (n == NULL) { /* cannot find a free place? */
+ rehash(L, t, key); /* grow table */
+ return luaH_set(L, t, key); /* re-insert key into grown table */
+ }
+ lua_assert(n != dummynode);
+ othern = mainposition(t, key2tval(mp));
+ if (othern != mp) { /* is colliding node out of its main position? */
+ /* yes; move colliding node into free position */
+ while (gnext(othern) != mp) othern = gnext(othern); /* find previous */
+ gnext(othern) = n; /* redo the chain with `n' in place of `mp' */
+ *n = *mp; /* copy colliding node into free pos. (mp->next also goes) */
+ gnext(mp) = NULL; /* now `mp' is free */
+ setnilvalue(gval(mp));
+ }
+ else { /* colliding node is in its own main position */
+ /* new node will go into free position */
+ gnext(n) = gnext(mp); /* chain new position */
+ gnext(mp) = n;
+ mp = n;
+ }
+ }
+ gkey(mp)->value = key->value; gkey(mp)->tt = key->tt;
+ luaC_barriert(L, t, key);
+ lua_assert(ttisnil(gval(mp)));
+ return gval(mp);
+}
+
+
+/*
+** search function for integers
+*/
+const TValue *luaH_getnum (Table *t, int key) {
+ /* (1 <= key && key <= t->sizearray) */
+ if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray))
+ return &t->array[key-1];
+ else {
+ lua_Number nk = cast_num(key);
+ Node *n = hashnum(t, nk);
+ do { /* check whether `key' is somewhere in the chain */
+ if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+ }
+}
+
+
+/*
+** search function for strings
+*/
+const TValue *luaH_getstr (Table *t, TString *key) {
+ Node *n = hashstr(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ if (ttisstring(gkey(n)) && rawtsvalue(gkey(n)) == key)
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+}
+
+
+/*
+** main search function
+*/
+const TValue *luaH_get (Table *t, const TValue *key) {
+ switch (ttype(key)) {
+ case LUA_TNIL: return luaO_nilobject;
+ case LUA_TSTRING: return luaH_getstr(t, rawtsvalue(key));
+ case LUA_TNUMBER: {
+ int k;
+ lua_Number n = nvalue(key);
+ lua_number2int(k, n);
+ if (luai_numeq(cast_num(k), nvalue(key))) /* index is int? */
+ return luaH_getnum(t, k); /* use specialized version */
+ /* else go through */
+ }
+ default: {
+ Node *n = mainposition(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ if (luaO_rawequalObj(key2tval(n), key))
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+ }
+ }
+}
+
+
+TValue *luaH_set (lua_State *L, Table *t, const TValue *key) {
+ const TValue *p = luaH_get(t, key);
+ t->flags = 0;
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ if (ttisnil(key)) luaG_runerror(L, "table index is nil");
+ else if (ttisnumber(key) && luai_numisnan(nvalue(key)))
+ luaG_runerror(L, "table index is NaN");
+ return newkey(L, t, key);
+ }
+}
+
+
+TValue *luaH_setnum (lua_State *L, Table *t, int key) {
+ const TValue *p = luaH_getnum(t, key);
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ TValue k;
+ setnvalue(&k, cast_num(key));
+ return newkey(L, t, &k);
+ }
+}
+
+
+TValue *luaH_setstr (lua_State *L, Table *t, TString *key) {
+ const TValue *p = luaH_getstr(t, key);
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ TValue k;
+ setsvalue(L, &k, key);
+ return newkey(L, t, &k);
+ }
+}
+
+
+static int unbound_search (Table *t, unsigned int j) {
+ unsigned int i = j; /* i is zero or a present index */
+ j++;
+ /* find `i' and `j' such that i is present and j is not */
+ while (!ttisnil(luaH_getnum(t, j))) {
+ i = j;
+ j *= 2;
+ if (j > cast(unsigned int, MAX_INT)) { /* overflow? */
+ /* table was built with bad purposes: resort to linear search */
+ i = 1;
+ while (!ttisnil(luaH_getnum(t, i))) i++;
+ return i - 1;
+ }
+ }
+ /* now do a binary search between them */
+ while (j - i > 1) {
+ unsigned int m = (i+j)/2;
+ if (ttisnil(luaH_getnum(t, m))) j = m;
+ else i = m;
+ }
+ return i;
+}
+
+
+/*
+** Try to find a boundary in table `t'. A `boundary' is an integer index
+** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
+*/
+int luaH_getn (Table *t) {
+ unsigned int j = t->sizearray;
+ if (j > 0 && ttisnil(&t->array[j - 1])) {
+ /* there is a boundary in the array part: (binary) search for it */
+ unsigned int i = 0;
+ while (j - i > 1) {
+ unsigned int m = (i+j)/2;
+ if (ttisnil(&t->array[m - 1])) j = m;
+ else i = m;
+ }
+ return i;
+ }
+ /* else must find a boundary in hash part */
+ else if (t->node == dummynode) /* hash part is empty? */
+ return j; /* that is easy... */
+ else return unbound_search(t, j);
+}
+
+
+
+#if defined(LUA_DEBUG)
+
+Node *luaH_mainposition (const Table *t, const TValue *key) {
+ return mainposition(t, key);
+}
+
+int luaH_isdummy (Node *n) { return n == dummynode; }
+
+#endif
diff --git a/deps/lua/src/ltable.h b/deps/lua/src/ltable.h
new file mode 100644
index 0000000000000000000000000000000000000000..09193cdbe048c9a19b8277f9c2bfae4af98591cf
--- /dev/null
+++ b/deps/lua/src/ltable.h
@@ -0,0 +1,40 @@
+/*
+** $Id: ltable.h,v 2.10 2006/01/10 13:13:06 roberto Exp $
+** Lua tables (hash)
+** See Copyright Notice in lua.h
+*/
+
+#ifndef ltable_h
+#define ltable_h
+
+#include "lobject.h"
+
+
+#define gnode(t,i) (&(t)->node[i])
+#define gkey(n) (&(n)->i_key.nk)
+#define gval(n) (&(n)->i_val)
+#define gnext(n) ((n)->i_key.nk.next)
+
+#define key2tval(n) (&(n)->i_key.tvk)
+
+
+LUAI_FUNC const TValue *luaH_getnum (Table *t, int key);
+LUAI_FUNC TValue *luaH_setnum (lua_State *L, Table *t, int key);
+LUAI_FUNC const TValue *luaH_getstr (Table *t, TString *key);
+LUAI_FUNC TValue *luaH_setstr (lua_State *L, Table *t, TString *key);
+LUAI_FUNC const TValue *luaH_get (Table *t, const TValue *key);
+LUAI_FUNC TValue *luaH_set (lua_State *L, Table *t, const TValue *key);
+LUAI_FUNC Table *luaH_new (lua_State *L, int narray, int lnhash);
+LUAI_FUNC void luaH_resizearray (lua_State *L, Table *t, int nasize);
+LUAI_FUNC void luaH_free (lua_State *L, Table *t);
+LUAI_FUNC int luaH_next (lua_State *L, Table *t, StkId key);
+LUAI_FUNC int luaH_getn (Table *t);
+
+
+#if defined(LUA_DEBUG)
+LUAI_FUNC Node *luaH_mainposition (const Table *t, const TValue *key);
+LUAI_FUNC int luaH_isdummy (Node *n);
+#endif
+
+
+#endif
diff --git a/deps/lua/src/ltablib.c b/deps/lua/src/ltablib.c
new file mode 100644
index 0000000000000000000000000000000000000000..453b23b378bc5a215905cc974a9d981827b989a8
--- /dev/null
+++ b/deps/lua/src/ltablib.c
@@ -0,0 +1,278 @@
+/*
+** $Id: ltablib.c,v 1.38 2005/10/23 17:38:15 roberto Exp $
+** Library for Table Manipulation
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define ltablib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+#define aux_getn(L,n) (luaL_checktype(L, n, LUA_TTABLE), luaL_getn(L, n))
+
+
+static int foreachi (lua_State *L) {
+ int i;
+ int n = aux_getn(L, 1);
+ luaL_checktype(L, 2, LUA_TFUNCTION);
+ for (i=1; i <= n; i++) {
+ lua_pushvalue(L, 2); /* function */
+ lua_pushinteger(L, i); /* 1st argument */
+ lua_rawgeti(L, 1, i); /* 2nd argument */
+ lua_call(L, 2, 1);
+ if (!lua_isnil(L, -1))
+ return 1;
+ lua_pop(L, 1); /* remove nil result */
+ }
+ return 0;
+}
+
+
+static int foreach (lua_State *L) {
+ luaL_checktype(L, 1, LUA_TTABLE);
+ luaL_checktype(L, 2, LUA_TFUNCTION);
+ lua_pushnil(L); /* first key */
+ while (lua_next(L, 1)) {
+ lua_pushvalue(L, 2); /* function */
+ lua_pushvalue(L, -3); /* key */
+ lua_pushvalue(L, -3); /* value */
+ lua_call(L, 2, 1);
+ if (!lua_isnil(L, -1))
+ return 1;
+ lua_pop(L, 2); /* remove value and result */
+ }
+ return 0;
+}
+
+
+static int maxn (lua_State *L) {
+ lua_Number max = 0;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_pushnil(L); /* first key */
+ while (lua_next(L, 1)) {
+ lua_pop(L, 1); /* remove value */
+ if (lua_type(L, -1) == LUA_TNUMBER) {
+ lua_Number v = lua_tonumber(L, -1);
+ if (v > max) max = v;
+ }
+ }
+ lua_pushnumber(L, max);
+ return 1;
+}
+
+
+static int getn (lua_State *L) {
+ lua_pushinteger(L, aux_getn(L, 1));
+ return 1;
+}
+
+
+static int setn (lua_State *L) {
+ luaL_checktype(L, 1, LUA_TTABLE);
+#ifndef luaL_setn
+ luaL_setn(L, 1, luaL_checkint(L, 2));
+#else
+ luaL_error(L, LUA_QL("setn") " is obsolete");
+#endif
+ lua_pushvalue(L, 1);
+ return 1;
+}
+
+
+static int tinsert (lua_State *L) {
+ int e = aux_getn(L, 1) + 1; /* first empty element */
+ int pos; /* where to insert new element */
+ switch (lua_gettop(L)) {
+ case 2: { /* called with only 2 arguments */
+ pos = e; /* insert new element at the end */
+ break;
+ }
+ case 3: {
+ int i;
+ pos = luaL_checkint(L, 2); /* 2nd argument is the position */
+ if (pos > e) e = pos; /* `grow' array if necessary */
+ for (i = e; i > pos; i--) { /* move up elements */
+ lua_rawgeti(L, 1, i-1);
+ lua_rawseti(L, 1, i); /* t[i] = t[i-1] */
+ }
+ break;
+ }
+ default: {
+ return luaL_error(L, "wrong number of arguments to " LUA_QL("insert"));
+ }
+ }
+ luaL_setn(L, 1, e); /* new size */
+ lua_rawseti(L, 1, pos); /* t[pos] = v */
+ return 0;
+}
+
+
+static int tremove (lua_State *L) {
+ int e = aux_getn(L, 1);
+ int pos = luaL_optint(L, 2, e);
+ if (e == 0) return 0; /* table is `empty' */
+ luaL_setn(L, 1, e - 1); /* t.n = n-1 */
+ lua_rawgeti(L, 1, pos); /* result = t[pos] */
+ for ( ;pos= P */
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
+ if (i>u) luaL_error(L, "invalid order function for sorting");
+ lua_pop(L, 1); /* remove a[i] */
+ }
+ /* repeat --j until a[j] <= P */
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
+ if (j
+
+#define ltm_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lobject.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+
+
+
+const char *const luaT_typenames[] = {
+ "nil", "boolean", "userdata", "number",
+ "string", "table", "function", "userdata", "thread",
+ "proto", "upval"
+};
+
+
+void luaT_init (lua_State *L) {
+ static const char *const luaT_eventname[] = { /* ORDER TM */
+ "__index", "__newindex",
+ "__gc", "__mode", "__eq",
+ "__add", "__sub", "__mul", "__div", "__mod",
+ "__pow", "__unm", "__len", "__lt", "__le",
+ "__concat", "__call"
+ };
+ int i;
+ for (i=0; itmname[i] = luaS_new(L, luaT_eventname[i]);
+ luaS_fix(G(L)->tmname[i]); /* never collect these names */
+ }
+}
+
+
+/*
+** function to be used with macro "fasttm": optimized for absence of
+** tag methods
+*/
+const TValue *luaT_gettm (Table *events, TMS event, TString *ename) {
+ const TValue *tm = luaH_getstr(events, ename);
+ lua_assert(event <= TM_EQ);
+ if (ttisnil(tm)) { /* no tag method? */
+ events->flags |= cast_byte(1u<metatable;
+ break;
+ case LUA_TUSERDATA:
+ mt = uvalue(o)->metatable;
+ break;
+ default:
+ mt = G(L)->mt[ttype(o)];
+ }
+ return (mt ? luaH_getstr(mt, G(L)->tmname[event]) : luaO_nilobject);
+}
+
diff --git a/deps/lua/src/ltm.h b/deps/lua/src/ltm.h
new file mode 100644
index 0000000000000000000000000000000000000000..866c79668d89673f350d37d0408b37bd94965448
--- /dev/null
+++ b/deps/lua/src/ltm.h
@@ -0,0 +1,54 @@
+/*
+** $Id: ltm.h,v 2.6 2005/06/06 13:30:25 roberto Exp $
+** Tag methods
+** See Copyright Notice in lua.h
+*/
+
+#ifndef ltm_h
+#define ltm_h
+
+
+#include "lobject.h"
+
+
+/*
+* WARNING: if you change the order of this enumeration,
+* grep "ORDER TM"
+*/
+typedef enum {
+ TM_INDEX,
+ TM_NEWINDEX,
+ TM_GC,
+ TM_MODE,
+ TM_EQ, /* last tag method with `fast' access */
+ TM_ADD,
+ TM_SUB,
+ TM_MUL,
+ TM_DIV,
+ TM_MOD,
+ TM_POW,
+ TM_UNM,
+ TM_LEN,
+ TM_LT,
+ TM_LE,
+ TM_CONCAT,
+ TM_CALL,
+ TM_N /* number of elements in the enum */
+} TMS;
+
+
+
+#define gfasttm(g,et,e) ((et) == NULL ? NULL : \
+ ((et)->flags & (1u<<(e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e]))
+
+#define fasttm(l,et,e) gfasttm(G(l), et, e)
+
+LUAI_DATA const char *const luaT_typenames[];
+
+
+LUAI_FUNC const TValue *luaT_gettm (Table *events, TMS event, TString *ename);
+LUAI_FUNC const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o,
+ TMS event);
+LUAI_FUNC void luaT_init (lua_State *L);
+
+#endif
diff --git a/deps/lua/src/lua.c b/deps/lua/src/lua.c
new file mode 100644
index 0000000000000000000000000000000000000000..6df527db30fbd04bd9597cb610622a7e17a5002c
--- /dev/null
+++ b/deps/lua/src/lua.c
@@ -0,0 +1,377 @@
+/*
+** $Id: lua.c,v 1.157 2005/12/29 16:23:32 roberto Exp $
+** Lua stand-alone interpreter
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+
+#define lua_c
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+
+static lua_State *globalL = NULL;
+
+static const char *progname = LUA_PROGNAME;
+
+
+
+static void lstop (lua_State *L, lua_Debug *ar) {
+ (void)ar; /* unused arg. */
+ lua_sethook(L, NULL, 0, 0);
+ luaL_error(L, "interrupted!");
+}
+
+
+static void laction (int i) {
+ signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
+ terminate process (default action) */
+ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
+}
+
+
+static void print_usage (void) {
+ fprintf(stderr,
+ "usage: %s [options] [script [args]].\n"
+ "Available options are:\n"
+ " -e stat execute string " LUA_QL("stat") "\n"
+ " -l name require library " LUA_QL("name") "\n"
+ " -i enter interactive mode after executing " LUA_QL("script") "\n"
+ " -v show version information\n"
+ " -- stop handling options\n"
+ " - execute stdin and stop handling options\n"
+ ,
+ progname);
+ fflush(stderr);
+}
+
+
+static void l_message (const char *pname, const char *msg) {
+ if (pname) fprintf(stderr, "%s: ", pname);
+ fprintf(stderr, "%s\n", msg);
+ fflush(stderr);
+}
+
+
+static int report (lua_State *L, int status) {
+ if (status && !lua_isnil(L, -1)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg == NULL) msg = "(error object is not a string)";
+ l_message(progname, msg);
+ lua_pop(L, 1);
+ }
+ return status;
+}
+
+
+static int traceback (lua_State *L) {
+ lua_getfield(L, LUA_GLOBALSINDEX, "debug");
+ if (!lua_istable(L, -1)) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ lua_getfield(L, -1, "traceback");
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2);
+ return 1;
+ }
+ lua_pushvalue(L, 1); /* pass error message */
+ lua_pushinteger(L, 2); /* skip this function and traceback */
+ lua_call(L, 2, 1); /* call debug.traceback */
+ return 1;
+}
+
+
+static int docall (lua_State *L, int narg, int clear) {
+ int status;
+ int base = lua_gettop(L) - narg; /* function index */
+ lua_pushcfunction(L, traceback); /* push traceback function */
+ lua_insert(L, base); /* put it under chunk and args */
+ signal(SIGINT, laction);
+ status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
+ signal(SIGINT, SIG_DFL);
+ lua_remove(L, base); /* remove traceback function */
+ /* force a complete garbage collection in case of errors */
+ if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0);
+ return status;
+}
+
+
+static void print_version (void) {
+ l_message(NULL, LUA_VERSION " " LUA_COPYRIGHT);
+}
+
+
+static int getargs (lua_State *L, char **argv, int n) {
+ int narg;
+ int i;
+ int argc = 0;
+ while (argv[argc]) argc++; /* count total number of arguments */
+ narg = argc - (n + 1); /* number of arguments to the script */
+ luaL_checkstack(L, narg + 3, "too many arguments to script");
+ for (i=n+1; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ lua_createtable(L, narg, n + 1);
+ for (i=0; i < argc; i++) {
+ lua_pushstring(L, argv[i]);
+ lua_rawseti(L, -2, i - n);
+ }
+ return narg;
+}
+
+
+static int dofile (lua_State *L, const char *name) {
+ int status = luaL_loadfile(L, name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+
+static int dostring (lua_State *L, const char *s, const char *name) {
+ int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+
+static int dolibrary (lua_State *L, const char *name) {
+ lua_getglobal(L, "require");
+ lua_pushstring(L, name);
+ return report(L, lua_pcall(L, 1, 0, 0));
+}
+
+
+static const char *get_prompt (lua_State *L, int firstline) {
+ const char *p;
+ lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
+ p = lua_tostring(L, -1);
+ if (p == NULL) p = (firstline ? LUA_PROMPT : LUA_PROMPT2);
+ lua_pop(L, 1); /* remove global */
+ return p;
+}
+
+
+static int incomplete (lua_State *L, int status) {
+ if (status == LUA_ERRSYNTAX) {
+ size_t lmsg;
+ const char *msg = lua_tolstring(L, -1, &lmsg);
+ const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1);
+ if (strstr(msg, LUA_QL("")) == tp) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ }
+ return 0; /* else... */
+}
+
+
+static int pushline (lua_State *L, int firstline) {
+ char buffer[LUA_MAXINPUT];
+ char *b = buffer;
+ size_t l;
+ const char *prmt = get_prompt(L, firstline);
+ if (lua_readline(L, b, prmt) == 0)
+ return 0; /* no input */
+ l = strlen(b);
+ if (l > 0 && b[l-1] == '\n') /* line ends with newline? */
+ b[l-1] = '\0'; /* remove it */
+ if (firstline && b[0] == '=') /* first line starts with `=' ? */
+ lua_pushfstring(L, "return %s", b+1); /* change it to `return' */
+ else
+ lua_pushstring(L, b);
+ lua_freeline(L, b);
+ return 1;
+}
+
+
+static int loadline (lua_State *L) {
+ int status;
+ lua_settop(L, 0);
+ if (!pushline(L, 1))
+ return -1; /* no input */
+ for (;;) { /* repeat until gets a complete line */
+ status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
+ if (!incomplete(L, status)) break; /* cannot try to add lines? */
+ if (!pushline(L, 0)) /* no more input? */
+ return -1;
+ lua_pushliteral(L, "\n"); /* add a new line... */
+ lua_insert(L, -2); /* ...between the two lines */
+ lua_concat(L, 3); /* join them */
+ }
+ lua_saveline(L, 1);
+ lua_remove(L, 1); /* remove line */
+ return status;
+}
+
+
+static void dotty (lua_State *L) {
+ int status;
+ const char *oldprogname = progname;
+ progname = NULL;
+ while ((status = loadline(L)) != -1) {
+ if (status == 0) status = docall(L, 0, 0);
+ report(L, status);
+ if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */
+ lua_getglobal(L, "print");
+ lua_insert(L, 1);
+ if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
+ l_message(progname, lua_pushfstring(L,
+ "error calling " LUA_QL("print") " (%s)",
+ lua_tostring(L, -1)));
+ }
+ }
+ lua_settop(L, 0); /* clear stack */
+ fputs("\n", stdout);
+ fflush(stdout);
+ progname = oldprogname;
+}
+
+
+static int handle_script (lua_State *L, char **argv, int n) {
+ int status;
+ const char *fname;
+ int narg = getargs(L, argv, n); /* collect arguments */
+ lua_setglobal(L, "arg");
+ fname = argv[n];
+ if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
+ fname = NULL; /* stdin */
+ status = luaL_loadfile(L, fname);
+ lua_insert(L, -(narg+1));
+ if (status == 0)
+ status = docall(L, narg, 0);
+ else
+ lua_pop(L, narg);
+ return report(L, status);
+}
+
+
+static int collectargs (char **argv, int *pi, int *pv, int *pe) {
+ int i;
+ for (i = 1; argv[i] != NULL; i++) {
+ if (argv[i][0] != '-') /* not an option? */
+ return i;
+ switch (argv[i][1]) { /* option */
+ case '-': return (argv[i+1] != NULL ? i+1 : 0);
+ case '\0': return i;
+ case 'i': *pi = 1; /* go through */
+ case 'v': *pv = 1; break;
+ case 'e': *pe = 1; /* go through */
+ case 'l':
+ if (argv[i][2] == '\0') {
+ i++;
+ if (argv[i] == NULL) return -1;
+ }
+ break;
+ default: return -1; /* invalid option */
+ }
+ }
+ return 0;
+}
+
+
+static int runargs (lua_State *L, char **argv, int n) {
+ int i;
+ for (i = 1; i < n; i++) {
+ if (argv[i] == NULL) continue;
+ lua_assert(argv[i][0] == '-');
+ switch (argv[i][1]) { /* option */
+ case 'e': {
+ const char *chunk = argv[i] + 2;
+ if (*chunk == '\0') chunk = argv[++i];
+ lua_assert(chunk != NULL);
+ if (dostring(L, chunk, "=(command line)") != 0)
+ return 1;
+ break;
+ }
+ case 'l': {
+ const char *filename = argv[i] + 2;
+ if (*filename == '\0') filename = argv[++i];
+ lua_assert(filename != NULL);
+ if (dolibrary(L, filename))
+ return 1; /* stop if file fails */
+ break;
+ }
+ default: break;
+ }
+ }
+ return 0;
+}
+
+
+static int handle_luainit (lua_State *L) {
+ const char *init = getenv("LUA_INIT");
+ if (init == NULL) return 0; /* status OK */
+ else if (init[0] == '@')
+ return dofile(L, init+1);
+ else
+ return dostring(L, init, "=LUA_INIT");
+}
+
+
+struct Smain {
+ int argc;
+ char **argv;
+ int status;
+};
+
+
+static int pmain (lua_State *L) {
+ struct Smain *s = (struct Smain *)lua_touserdata(L, 1);
+ char **argv = s->argv;
+ int script;
+ int has_i = 0, has_v = 0, has_e = 0;
+ globalL = L;
+ if (argv[0] && argv[0][0]) progname = argv[0];
+ lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */
+ luaL_openlibs(L); /* open libraries */
+ lua_gc(L, LUA_GCRESTART, 0);
+ s->status = handle_luainit(L);
+ if (s->status != 0) return 0;
+ script = collectargs(argv, &has_i, &has_v, &has_e);
+ if (script < 0) { /* invalid args? */
+ print_usage();
+ s->status = 1;
+ return 0;
+ }
+ if (has_v) print_version();
+ s->status = runargs(L, argv, (script > 0) ? script : s->argc);
+ if (s->status != 0) return 0;
+ if (script)
+ s->status = handle_script(L, argv, script);
+ if (s->status != 0) return 0;
+ if (has_i)
+ dotty(L);
+ else if (script == 0 && !has_e && !has_v) {
+ if (lua_stdin_is_tty()) {
+ print_version();
+ dotty(L);
+ }
+ else dofile(L, NULL); /* executes stdin as a file */
+ }
+ return 0;
+}
+
+
+int main (int argc, char **argv) {
+ int status;
+ struct Smain s;
+ lua_State *L = lua_open(); /* create state */
+ if (L == NULL) {
+ l_message(argv[0], "cannot create state: not enough memory");
+ return EXIT_FAILURE;
+ }
+ s.argc = argc;
+ s.argv = argv;
+ status = lua_cpcall(L, &pmain, &s);
+ report(L, status);
+ lua_close(L);
+ return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/deps/lua/src/lua.h b/deps/lua/src/lua.h
new file mode 100644
index 0000000000000000000000000000000000000000..881f834555945d01b101984eb2eea22881258dee
--- /dev/null
+++ b/deps/lua/src/lua.h
@@ -0,0 +1,384 @@
+/*
+** $Id: lua.h,v 1.216 2006/01/10 12:50:13 roberto Exp $
+** Lua - An Extensible Extension Language
+** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
+** See Copyright Notice at the end of this file
+*/
+
+
+#ifndef lua_h
+#define lua_h
+
+#include
+#include
+
+
+#include "luaconf.h"
+
+
+#define LUA_VERSION "Lua 5.1"
+#define LUA_VERSION_NUM 501
+#define LUA_COPYRIGHT "Copyright (C) 1994-2006 Lua.org, PUC-Rio"
+#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
+
+
+/* mark for precompiled code (`Lua') */
+#define LUA_SIGNATURE "\033Lua"
+
+/* option for multiple returns in `lua_pcall' and `lua_call' */
+#define LUA_MULTRET (-1)
+
+
+/*
+** pseudo-indices
+*/
+#define LUA_REGISTRYINDEX (-10000)
+#define LUA_ENVIRONINDEX (-10001)
+#define LUA_GLOBALSINDEX (-10002)
+#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
+
+
+/* thread status; 0 is OK */
+#define LUA_YIELD 1
+#define LUA_ERRRUN 2
+#define LUA_ERRSYNTAX 3
+#define LUA_ERRMEM 4
+#define LUA_ERRERR 5
+
+
+typedef struct lua_State lua_State;
+
+typedef int (*lua_CFunction) (lua_State *L);
+
+
+/*
+** functions that read/write blocks when loading/dumping Lua chunks
+*/
+typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
+
+typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
+
+
+/*
+** prototype for memory-allocation functions
+*/
+typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
+
+
+/*
+** basic types
+*/
+#define LUA_TNONE (-1)
+
+#define LUA_TNIL 0
+#define LUA_TBOOLEAN 1
+#define LUA_TLIGHTUSERDATA 2
+#define LUA_TNUMBER 3
+#define LUA_TSTRING 4
+#define LUA_TTABLE 5
+#define LUA_TFUNCTION 6
+#define LUA_TUSERDATA 7
+#define LUA_TTHREAD 8
+
+
+
+/* minimum Lua stack available to a C function */
+#define LUA_MINSTACK 20
+
+
+/*
+** generic extra include file
+*/
+#if defined(LUA_USER_H)
+#include LUA_USER_H
+#endif
+
+
+/* type of numbers in Lua */
+typedef LUA_NUMBER lua_Number;
+
+
+/* type for integer functions */
+typedef LUA_INTEGER lua_Integer;
+
+
+
+/*
+** state manipulation
+*/
+LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
+LUA_API void (lua_close) (lua_State *L);
+LUA_API lua_State *(lua_newthread) (lua_State *L);
+
+LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
+
+
+/*
+** basic stack manipulation
+*/
+LUA_API int (lua_gettop) (lua_State *L);
+LUA_API void (lua_settop) (lua_State *L, int idx);
+LUA_API void (lua_pushvalue) (lua_State *L, int idx);
+LUA_API void (lua_remove) (lua_State *L, int idx);
+LUA_API void (lua_insert) (lua_State *L, int idx);
+LUA_API void (lua_replace) (lua_State *L, int idx);
+LUA_API int (lua_checkstack) (lua_State *L, int sz);
+
+LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
+
+
+/*
+** access functions (stack -> C)
+*/
+
+LUA_API int (lua_isnumber) (lua_State *L, int idx);
+LUA_API int (lua_isstring) (lua_State *L, int idx);
+LUA_API int (lua_iscfunction) (lua_State *L, int idx);
+LUA_API int (lua_isuserdata) (lua_State *L, int idx);
+LUA_API int (lua_type) (lua_State *L, int idx);
+LUA_API const char *(lua_typename) (lua_State *L, int tp);
+
+LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
+
+LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
+LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
+LUA_API int (lua_toboolean) (lua_State *L, int idx);
+LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
+LUA_API size_t (lua_objlen) (lua_State *L, int idx);
+LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
+LUA_API void *(lua_touserdata) (lua_State *L, int idx);
+LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
+LUA_API const void *(lua_topointer) (lua_State *L, int idx);
+
+
+/*
+** push functions (C -> stack)
+*/
+LUA_API void (lua_pushnil) (lua_State *L);
+LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
+LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
+LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
+LUA_API void (lua_pushstring) (lua_State *L, const char *s);
+LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
+ va_list argp);
+LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
+LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
+LUA_API void (lua_pushboolean) (lua_State *L, int b);
+LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
+LUA_API int (lua_pushthread) (lua_State *L);
+
+
+/*
+** get functions (Lua -> stack)
+*/
+LUA_API void (lua_gettable) (lua_State *L, int idx);
+LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawget) (lua_State *L, int idx);
+LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
+LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
+LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
+LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
+LUA_API void (lua_getfenv) (lua_State *L, int idx);
+
+
+/*
+** set functions (stack -> Lua)
+*/
+LUA_API void (lua_settable) (lua_State *L, int idx);
+LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawset) (lua_State *L, int idx);
+LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
+LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
+LUA_API int (lua_setfenv) (lua_State *L, int idx);
+
+
+/*
+** `load' and `call' functions (load and run Lua code)
+*/
+LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
+LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
+LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
+LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname);
+
+LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
+
+
+/*
+** coroutine functions
+*/
+LUA_API int (lua_yield) (lua_State *L, int nresults);
+LUA_API int (lua_resume) (lua_State *L, int narg);
+LUA_API int (lua_status) (lua_State *L);
+
+/*
+** garbage-collection function and options
+*/
+
+#define LUA_GCSTOP 0
+#define LUA_GCRESTART 1
+#define LUA_GCCOLLECT 2
+#define LUA_GCCOUNT 3
+#define LUA_GCCOUNTB 4
+#define LUA_GCSTEP 5
+#define LUA_GCSETPAUSE 6
+#define LUA_GCSETSTEPMUL 7
+
+LUA_API int (lua_gc) (lua_State *L, int what, int data);
+
+
+/*
+** miscellaneous functions
+*/
+
+LUA_API int (lua_error) (lua_State *L);
+
+LUA_API int (lua_next) (lua_State *L, int idx);
+
+LUA_API void (lua_concat) (lua_State *L, int n);
+
+LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
+LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define lua_pop(L,n) lua_settop(L, -(n)-1)
+
+#define lua_newtable(L) lua_createtable(L, 0, 0)
+
+#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
+
+#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
+
+#define lua_strlen(L,i) lua_objlen(L, (i))
+
+#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
+#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
+#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
+#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
+#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
+#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
+#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
+#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
+
+#define lua_pushliteral(L, s) \
+ lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
+
+#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
+#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
+
+#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
+
+
+
+/*
+** compatibility macros and functions
+*/
+
+#define lua_open() luaL_newstate()
+
+#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
+
+#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
+
+#define lua_Chunkreader lua_Reader
+#define lua_Chunkwriter lua_Writer
+
+
+
+/*
+** {======================================================================
+** Debug API
+** =======================================================================
+*/
+
+
+/*
+** Event codes
+*/
+#define LUA_HOOKCALL 0
+#define LUA_HOOKRET 1
+#define LUA_HOOKLINE 2
+#define LUA_HOOKCOUNT 3
+#define LUA_HOOKTAILRET 4
+
+
+/*
+** Event masks
+*/
+#define LUA_MASKCALL (1 << LUA_HOOKCALL)
+#define LUA_MASKRET (1 << LUA_HOOKRET)
+#define LUA_MASKLINE (1 << LUA_HOOKLINE)
+#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
+
+typedef struct lua_Debug lua_Debug; /* activation record */
+
+
+/* Functions to be called by the debuger in specific events */
+typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+
+
+LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+
+LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
+LUA_API lua_Hook lua_gethook (lua_State *L);
+LUA_API int lua_gethookmask (lua_State *L);
+LUA_API int lua_gethookcount (lua_State *L);
+
+
+struct lua_Debug {
+ int event;
+ const char *name; /* (n) */
+ const char *namewhat; /* (n) `global', `local', `field', `method' */
+ const char *what; /* (S) `Lua', `C', `main', `tail' */
+ const char *source; /* (S) */
+ int currentline; /* (l) */
+ int nups; /* (u) number of upvalues */
+ int linedefined; /* (S) */
+ int lastlinedefined; /* (S) */
+ char short_src[LUA_IDSIZE]; /* (S) */
+ /* private part */
+ int i_ci; /* active function */
+};
+
+/* }====================================================================== */
+
+
+/******************************************************************************
+* Copyright (C) 1994-2006 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+
+#endif
diff --git a/deps/lua/src/luac.c b/deps/lua/src/luac.c
new file mode 100644
index 0000000000000000000000000000000000000000..2dd76b76535f75e35271f465909307551afdf5be
--- /dev/null
+++ b/deps/lua/src/luac.c
@@ -0,0 +1,196 @@
+/*
+** $Id: luac.c,v 1.52 2005/11/11 14:03:13 lhf Exp $
+** Lua compiler (saves bytecodes to files; also list bytecodes)
+** See Copyright Notice in lua.h
+*/
+
+#include
+#include
+#include
+#include
+
+#define luac_c
+#define LUA_CORE
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "ldo.h"
+#include "lfunc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lstring.h"
+#include "lundump.h"
+
+#define PROGNAME "luac" /* default program name */
+#define OUTPUT PROGNAME ".out" /* default output file */
+
+static int listing=0; /* list bytecodes? */
+static int dumping=1; /* dump bytecodes? */
+static int stripping=0; /* strip debug information? */
+static char Output[]={ OUTPUT }; /* default output file name */
+static const char* output=Output; /* actual output file name */
+static const char* progname=PROGNAME; /* actual program name */
+
+static void fatal(const char* message)
+{
+ fprintf(stderr,"%s: %s\n",progname,message);
+ exit(EXIT_FAILURE);
+}
+
+static void cannot(const char* what)
+{
+ fprintf(stderr,"%s: cannot %s %s: %s\n",progname,what,output,strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+static void usage(const char* message)
+{
+ if (*message=='-')
+ fprintf(stderr,"%s: unrecognized option " LUA_QS "\n",progname,message);
+ else
+ fprintf(stderr,"%s: %s\n",progname,message);
+ fprintf(stderr,
+ "usage: %s [options] [filenames].\n"
+ "Available options are:\n"
+ " - process stdin\n"
+ " -l list\n"
+ " -o name output to file " LUA_QL("name") " (default is \"%s\")\n"
+ " -p parse only\n"
+ " -s strip debug information\n"
+ " -v show version information\n"
+ " -- stop handling options\n",
+ progname,Output);
+ exit(EXIT_FAILURE);
+}
+
+#define IS(s) (strcmp(argv[i],s)==0)
+
+static int doargs(int argc, char* argv[])
+{
+ int i;
+ if (argv[0]!=NULL && *argv[0]!=0) progname=argv[0];
+ for (i=1; itop+(i))->l.p)
+
+static Proto* combine(lua_State* L, int n)
+{
+ if (n==1)
+ return toproto(L,-1);
+ else
+ {
+ int i,pc;
+ Proto* f=luaF_newproto(L);
+ setptvalue2s(L,L->top,f); incr_top(L);
+ f->source=luaS_newliteral(L,"=(" PROGNAME ")");
+ f->maxstacksize=1;
+ pc=2*n+1;
+ f->code=luaM_newvector(L,pc,Instruction);
+ f->sizecode=pc;
+ f->p=luaM_newvector(L,n,Proto*);
+ f->sizep=n;
+ pc=0;
+ for (i=0; ip[i]=toproto(L,i-n-1);
+ f->code[pc++]=CREATE_ABx(OP_CLOSURE,0,i);
+ f->code[pc++]=CREATE_ABC(OP_CALL,0,1,1);
+ }
+ f->code[pc++]=CREATE_ABC(OP_RETURN,0,1,0);
+ return f;
+ }
+}
+
+static int writer(lua_State* L, const void* p, size_t size, void* u)
+{
+ UNUSED(L);
+ return (fwrite(p,size,1,(FILE*)u)!=1) && (size!=0);
+}
+
+struct Smain {
+ int argc;
+ char** argv;
+};
+
+static int pmain(lua_State* L)
+{
+ struct Smain* s = (struct Smain*)lua_touserdata(L, 1);
+ int argc=s->argc;
+ char** argv=s->argv;
+ Proto* f;
+ int i;
+ if (!lua_checkstack(L,argc)) fatal("too many input files");
+ for (i=0; i1);
+ if (dumping)
+ {
+ FILE* D= (output==NULL) ? stdout : fopen(output,"wb");
+ if (D==NULL) cannot("open");
+ lua_lock(L);
+ luaU_dump(L,f,writer,D,stripping);
+ lua_unlock(L);
+ if (ferror(D)) cannot("write");
+ if (fclose(D)) cannot("close");
+ }
+ return 0;
+}
+
+int main(int argc, char* argv[])
+{
+ lua_State* L;
+ struct Smain s;
+ int i=doargs(argc,argv);
+ argc-=i; argv+=i;
+ if (argc<=0) usage("no input files given");
+ L=lua_open();
+ if (L==NULL) fatal("not enough memory for state");
+ s.argc=argc;
+ s.argv=argv;
+ if (lua_cpcall(L,pmain,&s)!=0) fatal(lua_tostring(L,-1));
+ lua_close(L);
+ return EXIT_SUCCESS;
+}
diff --git a/deps/lua/src/luaconf.h b/deps/lua/src/luaconf.h
new file mode 100644
index 0000000000000000000000000000000000000000..97a3e30c0eb1cf207125434c1bbda5966f0d209f
--- /dev/null
+++ b/deps/lua/src/luaconf.h
@@ -0,0 +1,736 @@
+/*
+** $Id: luaconf.h,v 1.81 2006/02/10 17:44:06 roberto Exp $
+** Configuration file for Lua
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lconfig_h
+#define lconfig_h
+
+#include
+#include
+
+
+/*
+** ==================================================================
+** Search for "@@" to find all configurable definitions.
+** ===================================================================
+*/
+
+
+/*
+@@ LUA_ANSI controls the use of non-ansi features.
+** CHANGE it (define it) if you want Lua to avoid the use of any
+** non-ansi feature or library.
+*/
+#if defined(__STRICT_ANSI__)
+#define LUA_ANSI
+#endif
+
+
+#if !defined(LUA_ANSI) && defined(_WIN32)
+#define LUA_WIN
+#endif
+
+#if defined(LUA_USE_LINUX)
+#define LUA_USE_POSIX
+#define LUA_USE_DLOPEN /* needs an extra library: -ldl */
+#define LUA_USE_READLINE /* needs some extra libraries */
+#endif
+
+#if defined(LUA_USE_MACOSX)
+#define LUA_USE_POSIX
+#define LUA_DL_DYLD /* does not need extra library */
+#endif
+
+
+
+/*
+@@ LUA_USE_POSIX includes all functionallity listed as X/Open System
+@* Interfaces Extension (XSI).
+** CHANGE it (define it) if your system is XSI compatible.
+*/
+#if defined(LUA_USE_POSIX)
+#define LUA_USE_MKSTEMP
+#define LUA_USE_ISATTY
+#define LUA_USE_POPEN
+#define LUA_USE_ULONGJMP
+#endif
+
+
+/*
+@@ LUA_PATH_DEFAULT is the default path that Lua uses to look for
+@* Lua libraries.
+@@ LUA_CPATH_DEFAULT is the default path that Lua uses to look for
+@* C libraries.
+** CHANGE them if your machine has a non-conventional directory
+** hierarchy or if you want to install your libraries in
+** non-conventional directories.
+*/
+#if defined(_WIN32)
+/*
+** In Windows, any exclamation mark ('!') in the path is replaced by the
+** path of the directory of the executable file of the current process.
+*/
+#define LUA_LDIR "!\\lua\\"
+#define LUA_CDIR "!\\"
+#define LUA_PATH_DEFAULT \
+ ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" \
+ LUA_CDIR"?.lua;" LUA_CDIR"?\\init.lua"
+#define LUA_CPATH_DEFAULT \
+ ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
+
+#else
+#define LUA_ROOT "/usr/local/"
+#define LUA_LDIR LUA_ROOT "share/lua/5.1/"
+#define LUA_CDIR LUA_ROOT "lib/lua/5.1/"
+#define LUA_PATH_DEFAULT \
+ "./?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua;" \
+ LUA_CDIR"?.lua;" LUA_CDIR"?/init.lua"
+#define LUA_CPATH_DEFAULT \
+ "./?.so;" LUA_CDIR"?.so;" LUA_CDIR"loadall.so"
+#endif
+
+
+/*
+@@ LUA_DIRSEP is the directory separator (for submodules).
+** CHANGE it if your machine does not use "/" as the directory separator
+** and is not Windows. (On Windows Lua automatically uses "\".)
+*/
+#if defined(_WIN32)
+#define LUA_DIRSEP "\\"
+#else
+#define LUA_DIRSEP "/"
+#endif
+
+
+/*
+@@ LUA_PATHSEP is the character that separates templates in a path.
+@@ LUA_PATH_MARK is the string that marks the substitution points in a
+@* template.
+@@ LUA_EXECDIR in a Windows path is replaced by the executable's
+@* directory.
+@@ LUA_IGMARK is a mark to ignore all before it when bulding the
+@* luaopen_ function name.
+** CHANGE them if for some reason your system cannot use those
+** characters. (E.g., if one of those characters is a common character
+** in file/directory names.) Probably you do not need to change them.
+*/
+#define LUA_PATHSEP ";"
+#define LUA_PATH_MARK "?"
+#define LUA_EXECDIR "!"
+#define LUA_IGMARK "-"
+
+
+/*
+@@ LUA_INTEGER is the integral type used by lua_pushinteger/lua_tointeger.
+** CHANGE that if ptrdiff_t is not adequate on your machine. (On most
+** machines, ptrdiff_t gives a good choice between int or long.)
+*/
+#define LUA_INTEGER ptrdiff_t
+
+
+/*
+@@ LUA_API is a mark for all core API functions.
+@@ LUALIB_API is a mark for all standard library functions.
+** CHANGE them if you need to define those functions in some special way.
+** For instance, if you want to create one Windows DLL with the core and
+** the libraries, you may want to use the following definition (define
+** LUA_BUILD_AS_DLL to get it).
+*/
+#if defined(LUA_BUILD_AS_DLL)
+
+#if defined(LUA_CORE) || defined(LUA_LIB)
+#define LUA_API __declspec(dllexport)
+#else
+#define LUA_API __declspec(dllimport)
+#endif
+
+#else
+
+#define LUA_API extern
+
+#endif
+
+/* more often than not the libs go together with the core */
+#define LUALIB_API LUA_API
+
+
+/*
+@@ LUAI_FUNC is a mark for all extern functions that are not to be
+@* exported to outside modules.
+@@ LUAI_DATA is a mark for all extern (const) variables that are not to
+@* be exported to outside modules.
+** CHANGE them if you need to mark them in some special way. Elf/gcc
+** (versions 3.2 and later) mark them as "hidden" to optimize access
+** when Lua is compiled as a shared library.
+*/
+#if defined(luaall_c)
+#define LUAI_FUNC static
+#define LUAI_DATA /* empty */
+
+#elif defined(__GNUC__) && ((__GNUC__*100 + __GNUC_MINOR__) >= 302) && \
+ defined(__ELF__)
+#define LUAI_FUNC __attribute__((visibility("hidden"))) extern
+#define LUAI_DATA LUAI_FUNC
+
+#else
+#define LUAI_FUNC extern
+#define LUAI_DATA extern
+#endif
+
+
+
+/*
+@@ LUA_QL describes how error messages quote program elements.
+** CHANGE it if you want a different appearance.
+*/
+#define LUA_QL(x) "'" x "'"
+#define LUA_QS LUA_QL("%s")
+
+
+/*
+@@ LUA_IDSIZE gives the maximum size for the description of the source
+@* of a function in debug information.
+** CHANGE it if you want a different size.
+*/
+#define LUA_IDSIZE 60
+
+
+/*
+** {==================================================================
+** Stand-alone configuration
+** ===================================================================
+*/
+
+#if defined(lua_c) || defined(luaall_c)
+
+/*
+@@ lua_stdin_is_tty detects whether the standard input is a 'tty' (that
+@* is, whether we're running lua interactively).
+** CHANGE it if you have a better definition for non-POSIX/non-Windows
+** systems.
+*/
+#if defined(LUA_USE_ISATTY)
+#include
+#define lua_stdin_is_tty() isatty(0)
+#elif defined(LUA_WIN)
+#include
+#include
+#define lua_stdin_is_tty() _isatty(_fileno(stdin))
+#else
+#define lua_stdin_is_tty() 1 /* assume stdin is a tty */
+#endif
+
+
+/*
+@@ LUA_PROMPT is the default prompt used by stand-alone Lua.
+@@ LUA_PROMPT2 is the default continuation prompt used by stand-alone Lua.
+** CHANGE them if you want different prompts. (You can also change the
+** prompts dynamically, assigning to globals _PROMPT/_PROMPT2.)
+*/
+#define LUA_PROMPT "> "
+#define LUA_PROMPT2 ">> "
+
+
+/*
+@@ LUA_PROGNAME is the default name for the stand-alone Lua program.
+** CHANGE it if your stand-alone interpreter has a different name and
+** your system is not able to detect that name automatically.
+*/
+#define LUA_PROGNAME "lua"
+
+
+/*
+@@ LUA_MAXINPUT is the maximum length for an input line in the
+@* stand-alone interpreter.
+** CHANGE it if you need longer lines.
+*/
+#define LUA_MAXINPUT 512
+
+
+/*
+@@ lua_readline defines how to show a prompt and then read a line from
+@* the standard input.
+@@ lua_saveline defines how to "save" a read line in a "history".
+@@ lua_freeline defines how to free a line read by lua_readline.
+** CHANGE them if you want to improve this functionality (e.g., by using
+** GNU readline and history facilities).
+*/
+#if defined(LUA_USE_READLINE)
+#include
+#include
+#include
+#define lua_readline(L,b,p) ((void)L, ((b)=readline(p)) != NULL)
+#define lua_saveline(L,idx) \
+ if (lua_strlen(L,idx) > 0) /* non-empty line? */ \
+ add_history(lua_tostring(L, idx)); /* add it to history */
+#define lua_freeline(L,b) ((void)L, free(b))
+#else
+#define lua_readline(L,b,p) \
+ ((void)L, fputs(p, stdout), fflush(stdout), /* show prompt */ \
+ fgets(b, LUA_MAXINPUT, stdin) != NULL) /* get line */
+#define lua_saveline(L,idx) { (void)L; (void)idx; }
+#define lua_freeline(L,b) { (void)L; (void)b; }
+#endif
+
+#endif
+
+/* }================================================================== */
+
+
+/*
+@@ LUAI_GCPAUSE defines the default pause between garbage-collector cycles
+@* as a percentage.
+** CHANGE it if you want the GC to run faster or slower (higher values
+** mean larger pauses which mean slower collection.) You can also change
+** this value dynamically.
+*/
+#define LUAI_GCPAUSE 200 /* 200% (wait memory to double before next GC) */
+
+
+/*
+@@ LUAI_GCMUL defines the default speed of garbage collection relative to
+@* memory allocation as a percentage.
+** CHANGE it if you want to change the granularity of the garbage
+** collection. (Higher values mean coarser collections. 0 represents
+** infinity, where each step performs a full collection.) You can also
+** change this value dynamically.
+*/
+#define LUAI_GCMUL 200 /* GC runs 'twice the speed' of memory allocation */
+
+
+
+/*
+@@ LUA_COMPAT_GETN controls compatibility with old getn behavior.
+** CHANGE it (define it) if you want exact compatibility with the
+** behavior of setn/getn in Lua 5.0.
+*/
+#undef LUA_COMPAT_GETN
+
+/*
+@@ LUA_COMPAT_LOADLIB controls compatibility about global loadlib.
+** CHANGE it to undefined as soon as you do not need a global 'loadlib'
+** function (the function is still available as 'package.loadlib').
+*/
+#undef LUA_COMPAT_LOADLIB
+
+/*
+@@ LUA_COMPAT_VARARG controls compatibility with old vararg feature.
+** CHANGE it to undefined as soon as your programs use only '...' to
+** access vararg parameters (instead of the old 'arg' table).
+*/
+#define LUA_COMPAT_VARARG
+
+/*
+@@ LUA_COMPAT_MOD controls compatibility with old math.mod function.
+** CHANGE it to undefined as soon as your programs use 'math.fmod' or
+** the new '%' operator instead of 'math.mod'.
+*/
+#define LUA_COMPAT_MOD
+
+/*
+@@ LUA_COMPAT_LSTR controls compatibility with old long string nesting
+@* facility.
+** CHANGE it to 2 if you want the old behaviour, or undefine it to turn
+** off the advisory error when nesting [[...]].
+*/
+#define LUA_COMPAT_LSTR 1
+
+/*
+@@ LUA_COMPAT_GFIND controls compatibility with old 'string.gfind' name.
+** CHANGE it to undefined as soon as you rename 'string.gfind' to
+** 'string.gmatch'.
+*/
+#define LUA_COMPAT_GFIND
+
+/*
+@@ LUA_COMPAT_OPENLIB controls compatibility with old 'luaL_openlib'
+@* behavior.
+** CHANGE it to undefined as soon as you replace to 'luaL_registry'
+** your uses of 'luaL_openlib'
+*/
+#define LUA_COMPAT_OPENLIB
+
+
+
+/*
+@@ luai_apicheck is the assert macro used by the Lua-C API.
+** CHANGE luai_apicheck if you want Lua to perform some checks in the
+** parameters it gets from API calls. This may slow down the interpreter
+** a bit, but may be quite useful when debugging C code that interfaces
+** with Lua. A useful redefinition is to use assert.h.
+*/
+#if defined(LUA_USE_APICHECK)
+#include
+#define luai_apicheck(L,o) { (void)L; assert(o); }
+#else
+#define luai_apicheck(L,o) { (void)L; }
+#endif
+
+
+/*
+@@ LUAI_BITSINT defines the number of bits in an int.
+** CHANGE here if Lua cannot automatically detect the number of bits of
+** your machine. Probably you do not need to change this.
+*/
+/* avoid overflows in comparison */
+#if INT_MAX-20 < 32760
+#define LUAI_BITSINT 16
+#elif INT_MAX > 2147483640L
+/* int has at least 32 bits */
+#define LUAI_BITSINT 32
+#else
+#error "you must define LUA_BITSINT with number of bits in an integer"
+#endif
+
+
+/*
+@@ LUAI_UINT32 is an unsigned integer with at least 32 bits.
+@@ LUAI_INT32 is an signed integer with at least 32 bits.
+@@ LUAI_UMEM is an unsigned integer big enough to count the total
+@* memory used by Lua.
+@@ LUAI_MEM is a signed integer big enough to count the total memory
+@* used by Lua.
+** CHANGE here if for some weird reason the default definitions are not
+** good enough for your machine. (The definitions in the 'else'
+** part always works, but may waste space on machines with 64-bit
+** longs.) Probably you do not need to change this.
+*/
+#if LUAI_BITSINT >= 32
+#define LUAI_UINT32 unsigned int
+#define LUAI_INT32 int
+#define LUAI_MAXINT32 INT_MAX
+#define LUAI_UMEM size_t
+#define LUAI_MEM ptrdiff_t
+#else
+/* 16-bit ints */
+#define LUAI_UINT32 unsigned long
+#define LUAI_INT32 long
+#define LUAI_MAXINT32 LONG_MAX
+#define LUAI_UMEM unsigned long
+#define LUAI_MEM long
+#endif
+
+
+/*
+@@ LUAI_MAXCALLS limits the number of nested calls.
+** CHANGE it if you need really deep recursive calls. This limit is
+** arbitrary; its only purpose is to stop infinite recursion before
+** exhausting memory.
+*/
+#define LUAI_MAXCALLS 20000
+
+
+/*
+@@ LUAI_MAXCSTACK limits the number of Lua stack slots that a C function
+@* can use.
+** CHANGE it if you need lots of (Lua) stack space for your C
+** functions. This limit is arbitrary; its only purpose is to stop C
+** functions to consume unlimited stack space.
+*/
+#define LUAI_MAXCSTACK 2048
+
+
+
+/*
+** {==================================================================
+** CHANGE (to smaller values) the following definitions if your system
+** has a small C stack. (Or you may want to change them to larger
+** values if your system has a large C stack and these limits are
+** too rigid for you.) Some of these constants control the size of
+** stack-allocated arrays used by the compiler or the interpreter, while
+** others limit the maximum number of recursive calls that the compiler
+** or the interpreter can perform. Values too large may cause a C stack
+** overflow for some forms of deep constructs.
+** ===================================================================
+*/
+
+
+/*
+@@ LUAI_MAXCCALLS is the maximum depth for nested C calls (short) and
+@* syntactical nested non-terminals in a program.
+*/
+#define LUAI_MAXCCALLS 200
+
+
+/*
+@@ LUAI_MAXVARS is the maximum number of local variables per function
+@* (must be smaller than 250).
+*/
+#define LUAI_MAXVARS 200
+
+
+/*
+@@ LUAI_MAXUPVALUES is the maximum number of upvalues per function
+@* (must be smaller than 250).
+*/
+#define LUAI_MAXUPVALUES 60
+
+
+/*
+@@ LUAL_BUFFERSIZE is the buffer size used by the lauxlib buffer system.
+*/
+#define LUAL_BUFFERSIZE BUFSIZ
+
+/* }================================================================== */
+
+
+
+
+/*
+** {==================================================================
+@@ LUA_NUMBER is the type of numbers in Lua.
+** CHANGE the following definitions only if you want to build Lua
+** with a number type different from double. You may also need to
+** change lua_number2int & lua_number2integer.
+** ===================================================================
+*/
+
+#define LUA_NUMBER_DOUBLE
+#define LUA_NUMBER double
+
+/*
+@@ LUAI_UACNUMBER is the result of an 'usual argument conversion'
+@* over a number.
+*/
+#define LUAI_UACNUMBER double
+
+
+/*
+@@ LUA_NUMBER_SCAN is the format for reading numbers.
+@@ LUA_NUMBER_FMT is the format for writing numbers.
+@@ lua_number2str converts a number to a string.
+@@ LUAI_MAXNUMBER2STR is maximum size of previous conversion.
+@@ lua_str2number converts a string to a number.
+*/
+#define LUA_NUMBER_SCAN "%lf"
+#define LUA_NUMBER_FMT "%.14g"
+#define lua_number2str(s,n) sprintf((s), LUA_NUMBER_FMT, (n))
+#define LUAI_MAXNUMBER2STR 32 /* 16 digits, sign, point, and \0 */
+#define lua_str2number(s,p) strtod((s), (p))
+
+
+/*
+@@ The luai_num* macros define the primitive operations over numbers.
+*/
+#if defined(LUA_CORE)
+#include
+#define luai_numadd(a,b) ((a)+(b))
+#define luai_numsub(a,b) ((a)-(b))
+#define luai_nummul(a,b) ((a)*(b))
+#define luai_numdiv(a,b) ((a)/(b))
+#define luai_nummod(a,b) ((a) - floor((a)/(b))*(b))
+#define luai_numpow(a,b) (pow(a,b))
+#define luai_numunm(a) (-(a))
+#define luai_numeq(a,b) ((a)==(b))
+#define luai_numlt(a,b) ((a)<(b))
+#define luai_numle(a,b) ((a)<=(b))
+#define luai_numisnan(a) (!luai_numeq((a), (a)))
+#endif
+
+
+/*
+@@ lua_number2int is a macro to convert lua_Number to int.
+@@ lua_number2integer is a macro to convert lua_Number to lua_Integer.
+** CHANGE them if you know a faster way to convert a lua_Number to
+** int (with any rounding method and without throwing errors) in your
+** system. In Pentium machines, a naive typecast from double to int
+** in C is extremely slow, so any alternative is worth trying.
+*/
+
+/* On a Pentium, resort to a trick */
+#if defined(LUA_NUMBER_DOUBLE) && !defined(LUA_ANSI) && !defined(__SSE2__) && \
+ (defined(__i386) || defined (_M_IX86) || defined(__i386__))
+union luai_Cast { double l_d; long l_l; };
+#define lua_number2int(i,d) \
+ { volatile union luai_Cast u; u.l_d = (d) + 6755399441055744.0; (i) = u.l_l; }
+#define lua_number2integer(i,n) lua_number2int(i, n)
+
+/* this option always works, but may be slow */
+#else
+#define lua_number2int(i,d) ((i)=(int)(d))
+#define lua_number2integer(i,d) ((i)=(lua_Integer)(d))
+
+#endif
+
+/* }================================================================== */
+
+
+/*
+@@ LUAI_USER_ALIGNMENT_T is a type that requires maximum alignment.
+** CHANGE it if your system requires alignments larger than double. (For
+** instance, if your system supports long doubles and they must be
+** aligned in 16-byte boundaries, then you should add long double in the
+** union.) Probably you do not need to change this.
+*/
+#define LUAI_USER_ALIGNMENT_T union { double u; void *s; long l; }
+
+
+/*
+@@ LUAI_THROW/LUAI_TRY define how Lua does exception handling.
+** CHANGE them if you prefer to use longjmp/setjmp even with C++
+** or if want/don't to use _longjmp/_setjmp instead of regular
+** longjmp/setjmp. By default, Lua handles errors with exceptions when
+** compiling as C++ code, with _longjmp/_setjmp when asked to use them,
+** and with longjmp/setjmp otherwise.
+*/
+#if defined(__cplusplus)
+/* C++ exceptions */
+#define LUAI_THROW(L,c) throw(c)
+#define LUAI_TRY(L,c,a) try { a } catch(...) \
+ { if ((c)->status == 0) (c)->status = -1; }
+#define luai_jmpbuf int /* dummy variable */
+
+#elif defined(LUA_USE_ULONGJMP)
+/* in Unix, try _longjmp/_setjmp (more efficient) */
+#define LUAI_THROW(L,c) _longjmp((c)->b, 1)
+#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a }
+#define luai_jmpbuf jmp_buf
+
+#else
+/* default handling with long jumps */
+#define LUAI_THROW(L,c) longjmp((c)->b, 1)
+#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a }
+#define luai_jmpbuf jmp_buf
+
+#endif
+
+
+/*
+@@ LUA_MAXCAPTURES is the maximum number of captures that a pattern
+@* can do during pattern-matching.
+** CHANGE it if you need more captures. This limit is arbitrary.
+*/
+#define LUA_MAXCAPTURES 32
+
+
+/*
+@@ lua_tmpnam is the function that the OS library uses to create a
+@* temporary name.
+@@ LUA_TMPNAMBUFSIZE is the maximum size of a name created by lua_tmpnam.
+** CHANGE them if you have an alternative to tmpnam (which is considered
+** insecure) or if you want the original tmpnam anyway. By default, Lua
+** uses tmpnam except when POSIX is available, where it uses mkstemp.
+*/
+#if defined(loslib_c) || defined(luaall_c)
+
+#if defined(LUA_USE_MKSTEMP)
+#include
+#define LUA_TMPNAMBUFSIZE 32
+#define lua_tmpnam(b,e) { \
+ strcpy(b, "/tmp/lua_XXXXXX"); \
+ e = mkstemp(b); \
+ if (e != -1) close(e); \
+ e = (e == -1); }
+
+#else
+#define LUA_TMPNAMBUFSIZE L_tmpnam
+#define lua_tmpnam(b,e) { e = (tmpnam(b) == NULL); }
+#endif
+
+#endif
+
+
+/*
+@@ lua_popen spawns a new process connected to the current one through
+@* the file streams.
+** CHANGE it if you have a way to implement it in your system.
+*/
+#if defined(LUA_USE_POPEN)
+
+#define lua_popen(L,c,m) ((void)L, popen(c,m))
+#define lua_pclose(L,file) ((void)L, (pclose(file) != -1))
+
+#elif defined(LUA_WIN)
+
+#define lua_popen(L,c,m) ((void)L, _popen(c,m))
+#define lua_pclose(L,file) ((void)L, (_pclose(file) != -1))
+
+#else
+
+#define lua_popen(L,c,m) ((void)((void)c, m), \
+ luaL_error(L, LUA_QL("popen") " not supported"), (FILE*)0)
+#define lua_pclose(L,file) ((void)((void)L, file), 0)
+
+#endif
+
+/*
+@@ LUA_DL_* define which dynamic-library system Lua should use.
+** CHANGE here if Lua has problems choosing the appropriate
+** dynamic-library system for your platform (either Windows' DLL, Mac's
+** dyld, or Unix's dlopen). If your system is some kind of Unix, there
+** is a good chance that it has dlopen, so LUA_DL_DLOPEN will work for
+** it. To use dlopen you also need to adapt the src/Makefile (probably
+** adding -ldl to the linker options), so Lua does not select it
+** automatically. (When you change the makefile to add -ldl, you must
+** also add -DLUA_USE_DLOPEN.)
+** If you do not want any kind of dynamic library, undefine all these
+** options.
+** By default, _WIN32 gets LUA_DL_DLL and MAC OS X gets LUA_DL_DYLD.
+*/
+#if defined(LUA_USE_DLOPEN)
+#define LUA_DL_DLOPEN
+#endif
+
+#if defined(LUA_WIN)
+#define LUA_DL_DLL
+#endif
+
+
+/*
+@@ LUAI_EXTRASPACE allows you to add user-specific data in a lua_State
+@* (the data goes just *before* the lua_State pointer).
+** CHANGE (define) this if you really need that. This value must be
+** a multiple of the maximum alignment required for your machine.
+*/
+#define LUAI_EXTRASPACE 0
+
+
+/*
+@@ luai_userstate* allow user-specific actions on threads.
+** CHANGE them if you defined LUAI_EXTRASPACE and need to do something
+** extra when a thread is created/deleted/resumed/yielded.
+*/
+#define luai_userstateopen(L) ((void)L)
+#define luai_userstateclose(L) ((void)L)
+#define luai_userstatethread(L,L1) ((void)L)
+#define luai_userstatefree(L) ((void)L)
+#define luai_userstateresume(L,n) ((void)L)
+#define luai_userstateyield(L,n) ((void)L)
+
+
+/*
+@@ LUA_INTFRMLEN is the length modifier for integer conversions
+@* in 'string.format'.
+@@ LUA_INTFRM_T is the integer type correspoding to the previous length
+@* modifier.
+** CHANGE them if your system supports long long or does not support long.
+*/
+
+#if defined(LUA_USELONGLONG)
+
+#define LUA_INTFRMLEN "ll"
+#define LUA_INTFRM_T long long
+
+#else
+
+#define LUA_INTFRMLEN "l"
+#define LUA_INTFRM_T long
+
+#endif
+
+
+
+/* =================================================================== */
+
+/*
+** Local configuration. You can use this space to add your redefinitions
+** without modifying the main part of the file.
+*/
+
+
+
+#endif
+
diff --git a/deps/lua/src/lualib.h b/deps/lua/src/lualib.h
new file mode 100644
index 0000000000000000000000000000000000000000..0c76232c0dd03737542483119723ebc2ae779c57
--- /dev/null
+++ b/deps/lua/src/lualib.h
@@ -0,0 +1,53 @@
+/*
+** $Id: lualib.h,v 1.36 2005/12/27 17:12:00 roberto Exp $
+** Lua standard libraries
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lualib_h
+#define lualib_h
+
+#include "lua.h"
+
+
+/* Key to file-handle type */
+#define LUA_FILEHANDLE "FILE*"
+
+
+#define LUA_COLIBNAME "coroutine"
+LUALIB_API int (luaopen_base) (lua_State *L);
+
+#define LUA_TABLIBNAME "table"
+LUALIB_API int (luaopen_table) (lua_State *L);
+
+#define LUA_IOLIBNAME "io"
+LUALIB_API int (luaopen_io) (lua_State *L);
+
+#define LUA_OSLIBNAME "os"
+LUALIB_API int (luaopen_os) (lua_State *L);
+
+#define LUA_STRLIBNAME "string"
+LUALIB_API int (luaopen_string) (lua_State *L);
+
+#define LUA_MATHLIBNAME "math"
+LUALIB_API int (luaopen_math) (lua_State *L);
+
+#define LUA_DBLIBNAME "debug"
+LUALIB_API int (luaopen_debug) (lua_State *L);
+
+#define LUA_LOADLIBNAME "package"
+LUALIB_API int (luaopen_package) (lua_State *L);
+
+
+/* open all previous libraries */
+LUALIB_API void (luaL_openlibs) (lua_State *L);
+
+
+
+#ifndef lua_assert
+#define lua_assert(x) ((void)0)
+#endif
+
+
+#endif
diff --git a/deps/lua/src/lundump.c b/deps/lua/src/lundump.c
new file mode 100644
index 0000000000000000000000000000000000000000..7fc635eeb7bebb9ba08ca0151ce24bf0c0acb910
--- /dev/null
+++ b/deps/lua/src/lundump.c
@@ -0,0 +1,223 @@
+/*
+** $Id: lundump.c,v 1.60 2006/02/16 15:53:49 lhf Exp $
+** load precompiled Lua chunks
+** See Copyright Notice in lua.h
+*/
+
+#include
+
+#define lundump_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lstring.h"
+#include "lundump.h"
+#include "lzio.h"
+
+typedef struct {
+ lua_State* L;
+ ZIO* Z;
+ Mbuffer* b;
+ const char* name;
+} LoadState;
+
+#ifdef LUAC_TRUST_BINARIES
+#define IF(c,s)
+#else
+#define IF(c,s) if (c) error(S,s)
+
+static void error(LoadState* S, const char* why)
+{
+ luaO_pushfstring(S->L,"%s: %s in precompiled chunk",S->name,why);
+ luaD_throw(S->L,LUA_ERRSYNTAX);
+}
+#endif
+
+#define LoadMem(S,b,n,size) LoadBlock(S,b,(n)*(size))
+#define LoadByte(S) (lu_byte)LoadChar(S)
+#define LoadVar(S,x) LoadMem(S,&x,1,sizeof(x))
+#define LoadVector(S,b,n,size) LoadMem(S,b,n,size)
+
+static void LoadBlock(LoadState* S, void* b, size_t size)
+{
+ size_t r=luaZ_read(S->Z,b,size);
+ IF (r!=0, "unexpected end");
+}
+
+static int LoadChar(LoadState* S)
+{
+ char x;
+ LoadVar(S,x);
+ return x;
+}
+
+static int LoadInt(LoadState* S)
+{
+ int x;
+ LoadVar(S,x);
+ IF (x<0, "bad integer");
+ return x;
+}
+
+static lua_Number LoadNumber(LoadState* S)
+{
+ lua_Number x;
+ LoadVar(S,x);
+ return x;
+}
+
+static TString* LoadString(LoadState* S)
+{
+ size_t size;
+ LoadVar(S,size);
+ if (size==0)
+ return NULL;
+ else
+ {
+ char* s=luaZ_openspace(S->L,S->b,size);
+ LoadBlock(S,s,size);
+ return luaS_newlstr(S->L,s,size-1); /* remove trailing '\0' */
+ }
+}
+
+static void LoadCode(LoadState* S, Proto* f)
+{
+ int n=LoadInt(S);
+ f->code=luaM_newvector(S->L,n,Instruction);
+ f->sizecode=n;
+ LoadVector(S,f->code,n,sizeof(Instruction));
+}
+
+static Proto* LoadFunction(LoadState* S, TString* p);
+
+static void LoadConstants(LoadState* S, Proto* f)
+{
+ int i,n;
+ n=LoadInt(S);
+ f->k=luaM_newvector(S->L,n,TValue);
+ f->sizek=n;
+ for (i=0; ik[i]);
+ for (i=0; ik[i];
+ int t=LoadChar(S);
+ switch (t)
+ {
+ case LUA_TNIL:
+ setnilvalue(o);
+ break;
+ case LUA_TBOOLEAN:
+ setbvalue(o,LoadChar(S));
+ break;
+ case LUA_TNUMBER:
+ setnvalue(o,LoadNumber(S));
+ break;
+ case LUA_TSTRING:
+ setsvalue2n(S->L,o,LoadString(S));
+ break;
+ default:
+ IF (1, "bad constant");
+ break;
+ }
+ }
+ n=LoadInt(S);
+ f->p=luaM_newvector(S->L,n,Proto*);
+ f->sizep=n;
+ for (i=0; ip[i]=NULL;
+ for (i=0; ip[i]=LoadFunction(S,f->source);
+}
+
+static void LoadDebug(LoadState* S, Proto* f)
+{
+ int i,n;
+ n=LoadInt(S);
+ f->lineinfo=luaM_newvector(S->L,n,int);
+ f->sizelineinfo=n;
+ LoadVector(S,f->lineinfo,n,sizeof(int));
+ n=LoadInt(S);
+ f->locvars=luaM_newvector(S->L,n,LocVar);
+ f->sizelocvars=n;
+ for (i=0; ilocvars[i].varname=NULL;
+ for (i=0; ilocvars[i].varname=LoadString(S);
+ f->locvars[i].startpc=LoadInt(S);
+ f->locvars[i].endpc=LoadInt(S);
+ }
+ n=LoadInt(S);
+ f->upvalues=luaM_newvector(S->L,n,TString*);
+ f->sizeupvalues=n;
+ for (i=0; iupvalues[i]=NULL;
+ for (i=0; iupvalues[i]=LoadString(S);
+}
+
+static Proto* LoadFunction(LoadState* S, TString* p)
+{
+ Proto* f=luaF_newproto(S->L);
+ setptvalue2s(S->L,S->L->top,f); incr_top(S->L);
+ f->source=LoadString(S); if (f->source==NULL) f->source=p;
+ f->linedefined=LoadInt(S);
+ f->lastlinedefined=LoadInt(S);
+ f->nups=LoadByte(S);
+ f->numparams=LoadByte(S);
+ f->is_vararg=LoadByte(S);
+ f->maxstacksize=LoadByte(S);
+ LoadCode(S,f);
+ LoadConstants(S,f);
+ LoadDebug(S,f);
+ IF (!luaG_checkcode(f), "bad code");
+ S->L->top--;
+ return f;
+}
+
+static void LoadHeader(LoadState* S)
+{
+ char h[LUAC_HEADERSIZE];
+ char s[LUAC_HEADERSIZE];
+ luaU_header(h);
+ LoadBlock(S,s,LUAC_HEADERSIZE);
+ IF (memcmp(h,s,LUAC_HEADERSIZE)!=0, "bad header");
+}
+
+/*
+** load precompiled chunk
+*/
+Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name)
+{
+ LoadState S;
+ if (*name=='@' || *name=='=')
+ S.name=name+1;
+ else if (*name==LUA_SIGNATURE[0])
+ S.name="binary string";
+ else
+ S.name=name;
+ S.L=L;
+ S.Z=Z;
+ S.b=buff;
+ LoadHeader(&S);
+ return LoadFunction(&S,luaS_newliteral(L,"=?"));
+}
+
+/*
+* make header
+*/
+void luaU_header (char* h)
+{
+ int x=1;
+ memcpy(h,LUA_SIGNATURE,sizeof(LUA_SIGNATURE)-1);
+ h+=sizeof(LUA_SIGNATURE)-1;
+ *h++=(char)LUAC_VERSION;
+ *h++=(char)LUAC_FORMAT;
+ *h++=(char)*(char*)&x; /* endianness */
+ *h++=(char)sizeof(int);
+ *h++=(char)sizeof(size_t);
+ *h++=(char)sizeof(Instruction);
+ *h++=(char)sizeof(lua_Number);
+ *h++=(char)(((lua_Number)0.5)==0); /* is lua_Number integral? */
+}
diff --git a/deps/lua/src/lundump.h b/deps/lua/src/lundump.h
new file mode 100644
index 0000000000000000000000000000000000000000..58cca5d19083a235e835566f1867104f011aa70d
--- /dev/null
+++ b/deps/lua/src/lundump.h
@@ -0,0 +1,36 @@
+/*
+** $Id: lundump.h,v 1.40 2005/11/11 14:03:13 lhf Exp $
+** load precompiled Lua chunks
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lundump_h
+#define lundump_h
+
+#include "lobject.h"
+#include "lzio.h"
+
+/* load one chunk; from lundump.c */
+LUAI_FUNC Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name);
+
+/* make header; from lundump.c */
+LUAI_FUNC void luaU_header (char* h);
+
+/* dump one chunk; from ldump.c */
+LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip);
+
+#ifdef luac_c
+/* print one chunk; from print.c */
+LUAI_FUNC void luaU_print (const Proto* f, int full);
+#endif
+
+/* for header of binary files -- this is Lua 5.1 */
+#define LUAC_VERSION 0x51
+
+/* for header of binary files -- this is the official format */
+#define LUAC_FORMAT 0
+
+/* size of header of binary files */
+#define LUAC_HEADERSIZE 12
+
+#endif
diff --git a/deps/lua/src/lvm.c b/deps/lua/src/lvm.c
new file mode 100644
index 0000000000000000000000000000000000000000..6f4c0291c970a0c49085797ef2e87565fada2cd3
--- /dev/null
+++ b/deps/lua/src/lvm.c
@@ -0,0 +1,762 @@
+/*
+** $Id: lvm.c,v 2.62 2006/01/23 19:51:43 roberto Exp $
+** Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+
+#define lvm_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lgc.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+#include "lvm.h"
+
+
+
+/* limit for table tag-method chains (to avoid loops) */
+#define MAXTAGLOOP 100
+
+
+const TValue *luaV_tonumber (const TValue *obj, TValue *n) {
+ lua_Number num;
+ if (ttisnumber(obj)) return obj;
+ if (ttisstring(obj) && luaO_str2d(svalue(obj), &num)) {
+ setnvalue(n, num);
+ return n;
+ }
+ else
+ return NULL;
+}
+
+
+int luaV_tostring (lua_State *L, StkId obj) {
+ if (!ttisnumber(obj))
+ return 0;
+ else {
+ char s[LUAI_MAXNUMBER2STR];
+ lua_Number n = nvalue(obj);
+ lua_number2str(s, n);
+ setsvalue2s(L, obj, luaS_new(L, s));
+ return 1;
+ }
+}
+
+
+static void traceexec (lua_State *L, const Instruction *pc) {
+ lu_byte mask = L->hookmask;
+ const Instruction *oldpc = L->savedpc;
+ L->savedpc = pc;
+ if (mask > LUA_MASKLINE) { /* instruction-hook set? */
+ if (L->hookcount == 0) {
+ resethookcount(L);
+ luaD_callhook(L, LUA_HOOKCOUNT, -1);
+ }
+ }
+ if (mask & LUA_MASKLINE) {
+ Proto *p = ci_func(L->ci)->l.p;
+ int npc = pcRel(pc, p);
+ int newline = getline(p, npc);
+ /* call linehook when enter a new function, when jump back (loop),
+ or when enter a new line */
+ if (npc == 0 || pc <= oldpc || newline != getline(p, pcRel(oldpc, p)))
+ luaD_callhook(L, LUA_HOOKLINE, newline);
+ }
+}
+
+
+static void callTMres (lua_State *L, StkId res, const TValue *f,
+ const TValue *p1, const TValue *p2) {
+ ptrdiff_t result = savestack(L, res);
+ setobj2s(L, L->top, f); /* push function */
+ setobj2s(L, L->top+1, p1); /* 1st argument */
+ setobj2s(L, L->top+2, p2); /* 2nd argument */
+ luaD_checkstack(L, 3);
+ L->top += 3;
+ luaD_call(L, L->top - 3, 1);
+ res = restorestack(L, result);
+ L->top--;
+ setobjs2s(L, res, L->top);
+}
+
+
+
+static void callTM (lua_State *L, const TValue *f, const TValue *p1,
+ const TValue *p2, const TValue *p3) {
+ setobj2s(L, L->top, f); /* push function */
+ setobj2s(L, L->top+1, p1); /* 1st argument */
+ setobj2s(L, L->top+2, p2); /* 2nd argument */
+ setobj2s(L, L->top+3, p3); /* 3th argument */
+ luaD_checkstack(L, 4);
+ L->top += 4;
+ luaD_call(L, L->top - 4, 0);
+}
+
+
+void luaV_gettable (lua_State *L, const TValue *t, TValue *key, StkId val) {
+ int loop;
+ for (loop = 0; loop < MAXTAGLOOP; loop++) {
+ const TValue *tm;
+ if (ttistable(t)) { /* `t' is a table? */
+ Table *h = hvalue(t);
+ const TValue *res = luaH_get(h, key); /* do a primitive get */
+ if (!ttisnil(res) || /* result is no nil? */
+ (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL) { /* or no TM? */
+ setobj2s(L, val, res);
+ return;
+ }
+ /* else will try the tag method */
+ }
+ else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX)))
+ luaG_typeerror(L, t, "index");
+ if (ttisfunction(tm)) {
+ callTMres(L, val, tm, t, key);
+ return;
+ }
+ t = tm; /* else repeat with `tm' */
+ }
+ luaG_runerror(L, "loop in gettable");
+}
+
+
+void luaV_settable (lua_State *L, const TValue *t, TValue *key, StkId val) {
+ int loop;
+ for (loop = 0; loop < MAXTAGLOOP; loop++) {
+ const TValue *tm;
+ if (ttistable(t)) { /* `t' is a table? */
+ Table *h = hvalue(t);
+ TValue *oldval = luaH_set(L, h, key); /* do a primitive set */
+ if (!ttisnil(oldval) || /* result is no nil? */
+ (tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL) { /* or no TM? */
+ setobj2t(L, oldval, val);
+ luaC_barriert(L, h, val);
+ return;
+ }
+ /* else will try the tag method */
+ }
+ else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX)))
+ luaG_typeerror(L, t, "index");
+ if (ttisfunction(tm)) {
+ callTM(L, tm, t, key, val);
+ return;
+ }
+ t = tm; /* else repeat with `tm' */
+ }
+ luaG_runerror(L, "loop in settable");
+}
+
+
+static int call_binTM (lua_State *L, const TValue *p1, const TValue *p2,
+ StkId res, TMS event) {
+ const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */
+ if (ttisnil(tm))
+ tm = luaT_gettmbyobj(L, p2, event); /* try second operand */
+ if (!ttisfunction(tm)) return 0;
+ callTMres(L, res, tm, p1, p2);
+ return 1;
+}
+
+
+static const TValue *get_compTM (lua_State *L, Table *mt1, Table *mt2,
+ TMS event) {
+ const TValue *tm1 = fasttm(L, mt1, event);
+ const TValue *tm2;
+ if (tm1 == NULL) return NULL; /* no metamethod */
+ if (mt1 == mt2) return tm1; /* same metatables => same metamethods */
+ tm2 = fasttm(L, mt2, event);
+ if (tm2 == NULL) return NULL; /* no metamethod */
+ if (luaO_rawequalObj(tm1, tm2)) /* same metamethods? */
+ return tm1;
+ return NULL;
+}
+
+
+static int call_orderTM (lua_State *L, const TValue *p1, const TValue *p2,
+ TMS event) {
+ const TValue *tm1 = luaT_gettmbyobj(L, p1, event);
+ const TValue *tm2;
+ if (ttisnil(tm1)) return -1; /* no metamethod? */
+ tm2 = luaT_gettmbyobj(L, p2, event);
+ if (!luaO_rawequalObj(tm1, tm2)) /* different metamethods? */
+ return -1;
+ callTMres(L, L->top, tm1, p1, p2);
+ return !l_isfalse(L->top);
+}
+
+
+static int l_strcmp (const TString *ls, const TString *rs) {
+ const char *l = getstr(ls);
+ size_t ll = ls->tsv.len;
+ const char *r = getstr(rs);
+ size_t lr = rs->tsv.len;
+ for (;;) {
+ int temp = strcoll(l, r);
+ if (temp != 0) return temp;
+ else { /* strings are equal up to a `\0' */
+ size_t len = strlen(l); /* index of first `\0' in both strings */
+ if (len == lr) /* r is finished? */
+ return (len == ll) ? 0 : 1;
+ else if (len == ll) /* l is finished? */
+ return -1; /* l is smaller than r (because r is not finished) */
+ /* both strings longer than `len'; go on comparing (after the `\0') */
+ len++;
+ l += len; ll -= len; r += len; lr -= len;
+ }
+ }
+}
+
+
+int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) {
+ int res;
+ if (ttype(l) != ttype(r))
+ return luaG_ordererror(L, l, r);
+ else if (ttisnumber(l))
+ return luai_numlt(nvalue(l), nvalue(r));
+ else if (ttisstring(l))
+ return l_strcmp(rawtsvalue(l), rawtsvalue(r)) < 0;
+ else if ((res = call_orderTM(L, l, r, TM_LT)) != -1)
+ return res;
+ return luaG_ordererror(L, l, r);
+}
+
+
+static int lessequal (lua_State *L, const TValue *l, const TValue *r) {
+ int res;
+ if (ttype(l) != ttype(r))
+ return luaG_ordererror(L, l, r);
+ else if (ttisnumber(l))
+ return luai_numle(nvalue(l), nvalue(r));
+ else if (ttisstring(l))
+ return l_strcmp(rawtsvalue(l), rawtsvalue(r)) <= 0;
+ else if ((res = call_orderTM(L, l, r, TM_LE)) != -1) /* first try `le' */
+ return res;
+ else if ((res = call_orderTM(L, r, l, TM_LT)) != -1) /* else try `lt' */
+ return !res;
+ return luaG_ordererror(L, l, r);
+}
+
+
+int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2) {
+ const TValue *tm;
+ lua_assert(ttype(t1) == ttype(t2));
+ switch (ttype(t1)) {
+ case LUA_TNIL: return 1;
+ case LUA_TNUMBER: return luai_numeq(nvalue(t1), nvalue(t2));
+ case LUA_TBOOLEAN: return bvalue(t1) == bvalue(t2); /* true must be 1 !! */
+ case LUA_TLIGHTUSERDATA: return pvalue(t1) == pvalue(t2);
+ case LUA_TUSERDATA: {
+ if (uvalue(t1) == uvalue(t2)) return 1;
+ tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable,
+ TM_EQ);
+ break; /* will try TM */
+ }
+ case LUA_TTABLE: {
+ if (hvalue(t1) == hvalue(t2)) return 1;
+ tm = get_compTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ);
+ break; /* will try TM */
+ }
+ default: return gcvalue(t1) == gcvalue(t2);
+ }
+ if (tm == NULL) return 0; /* no TM? */
+ callTMres(L, L->top, tm, t1, t2); /* call TM */
+ return !l_isfalse(L->top);
+}
+
+
+void luaV_concat (lua_State *L, int total, int last) {
+ do {
+ StkId top = L->base + last + 1;
+ int n = 2; /* number of elements handled in this pass (at least 2) */
+ if (!tostring(L, top-2) || !tostring(L, top-1)) {
+ if (!call_binTM(L, top-2, top-1, top-2, TM_CONCAT))
+ luaG_concaterror(L, top-2, top-1);
+ } else if (tsvalue(top-1)->len > 0) { /* if len=0, do nothing */
+ /* at least two string values; get as many as possible */
+ size_t tl = tsvalue(top-1)->len;
+ char *buffer;
+ int i;
+ /* collect total length */
+ for (n = 1; n < total && tostring(L, top-n-1); n++) {
+ size_t l = tsvalue(top-n-1)->len;
+ if (l >= MAX_SIZET - tl) luaG_runerror(L, "string length overflow");
+ tl += l;
+ }
+ buffer = luaZ_openspace(L, &G(L)->buff, tl);
+ tl = 0;
+ for (i=n; i>0; i--) { /* concat all strings */
+ size_t l = tsvalue(top-i)->len;
+ memcpy(buffer+tl, svalue(top-i), l);
+ tl += l;
+ }
+ setsvalue2s(L, top-n, luaS_newlstr(L, buffer, tl));
+ }
+ total -= n-1; /* got `n' strings to create 1 new */
+ last -= n-1;
+ } while (total > 1); /* repeat until only 1 result left */
+}
+
+
+static void Arith (lua_State *L, StkId ra, const TValue *rb,
+ const TValue *rc, TMS op) {
+ TValue tempb, tempc;
+ const TValue *b, *c;
+ if ((b = luaV_tonumber(rb, &tempb)) != NULL &&
+ (c = luaV_tonumber(rc, &tempc)) != NULL) {
+ lua_Number nb = nvalue(b), nc = nvalue(c);
+ switch (op) {
+ case TM_ADD: setnvalue(ra, luai_numadd(nb, nc)); break;
+ case TM_SUB: setnvalue(ra, luai_numsub(nb, nc)); break;
+ case TM_MUL: setnvalue(ra, luai_nummul(nb, nc)); break;
+ case TM_DIV: setnvalue(ra, luai_numdiv(nb, nc)); break;
+ case TM_MOD: setnvalue(ra, luai_nummod(nb, nc)); break;
+ case TM_POW: setnvalue(ra, luai_numpow(nb, nc)); break;
+ case TM_UNM: setnvalue(ra, luai_numunm(nb)); break;
+ default: lua_assert(0); break;
+ }
+ }
+ else if (!call_binTM(L, rb, rc, ra, op))
+ luaG_aritherror(L, rb, rc);
+}
+
+
+
+/*
+** some macros for common tasks in `luaV_execute'
+*/
+
+#define runtime_check(L, c) { if (!(c)) break; }
+
+#define RA(i) (base+GETARG_A(i))
+/* to be used after possible stack reallocation */
+#define RB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgR, base+GETARG_B(i))
+#define RC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgR, base+GETARG_C(i))
+#define RKB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, \
+ ISK(GETARG_B(i)) ? k+INDEXK(GETARG_B(i)) : base+GETARG_B(i))
+#define RKC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgK, \
+ ISK(GETARG_C(i)) ? k+INDEXK(GETARG_C(i)) : base+GETARG_C(i))
+#define KBx(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, k+GETARG_Bx(i))
+
+
+#define dojump(L,pc,i) {(pc) += (i); luai_threadyield(L);}
+
+
+#define Protect(x) { L->savedpc = pc; {x;}; base = L->base; }
+
+
+#define arith_op(op,tm) { \
+ TValue *rb = RKB(i); \
+ TValue *rc = RKC(i); \
+ if (ttisnumber(rb) && ttisnumber(rc)) { \
+ lua_Number nb = nvalue(rb), nc = nvalue(rc); \
+ setnvalue(ra, op(nb, nc)); \
+ } \
+ else \
+ Protect(Arith(L, ra, rb, rc, tm)); \
+ }
+
+
+
+void luaV_execute (lua_State *L, int nexeccalls) {
+ LClosure *cl;
+ StkId base;
+ TValue *k;
+ const Instruction *pc;
+ reentry: /* entry point */
+ pc = L->savedpc;
+ cl = &clvalue(L->ci->func)->l;
+ base = L->base;
+ k = cl->p->k;
+ /* main loop of interpreter */
+ for (;;) {
+ const Instruction i = *pc++;
+ StkId ra;
+ if ((L->hookmask & (LUA_MASKLINE | LUA_MASKCOUNT)) &&
+ (--L->hookcount == 0 || L->hookmask & LUA_MASKLINE)) {
+ traceexec(L, pc);
+ if (L->status == LUA_YIELD) { /* did hook yield? */
+ L->savedpc = pc - 1;
+ return;
+ }
+ base = L->base;
+ }
+ /* warning!! several calls may realloc the stack and invalidate `ra' */
+ ra = RA(i);
+ lua_assert(base == L->base && L->base == L->ci->base);
+ lua_assert(base <= L->top && L->top <= L->stack + L->stacksize);
+ lua_assert(L->top == L->ci->top || luaG_checkopenop(i));
+ switch (GET_OPCODE(i)) {
+ case OP_MOVE: {
+ setobjs2s(L, ra, RB(i));
+ continue;
+ }
+ case OP_LOADK: {
+ setobj2s(L, ra, KBx(i));
+ continue;
+ }
+ case OP_LOADBOOL: {
+ setbvalue(ra, GETARG_B(i));
+ if (GETARG_C(i)) pc++; /* skip next instruction (if C) */
+ continue;
+ }
+ case OP_LOADNIL: {
+ TValue *rb = RB(i);
+ do {
+ setnilvalue(rb--);
+ } while (rb >= ra);
+ continue;
+ }
+ case OP_GETUPVAL: {
+ int b = GETARG_B(i);
+ setobj2s(L, ra, cl->upvals[b]->v);
+ continue;
+ }
+ case OP_GETGLOBAL: {
+ TValue g;
+ TValue *rb = KBx(i);
+ sethvalue(L, &g, cl->env);
+ lua_assert(ttisstring(rb));
+ Protect(luaV_gettable(L, &g, rb, ra));
+ continue;
+ }
+ case OP_GETTABLE: {
+ Protect(luaV_gettable(L, RB(i), RKC(i), ra));
+ continue;
+ }
+ case OP_SETGLOBAL: {
+ TValue g;
+ sethvalue(L, &g, cl->env);
+ lua_assert(ttisstring(KBx(i)));
+ Protect(luaV_settable(L, &g, KBx(i), ra));
+ continue;
+ }
+ case OP_SETUPVAL: {
+ UpVal *uv = cl->upvals[GETARG_B(i)];
+ setobj(L, uv->v, ra);
+ luaC_barrier(L, uv, ra);
+ continue;
+ }
+ case OP_SETTABLE: {
+ Protect(luaV_settable(L, ra, RKB(i), RKC(i)));
+ continue;
+ }
+ case OP_NEWTABLE: {
+ int b = GETARG_B(i);
+ int c = GETARG_C(i);
+ sethvalue(L, ra, luaH_new(L, luaO_fb2int(b), luaO_fb2int(c)));
+ Protect(luaC_checkGC(L));
+ continue;
+ }
+ case OP_SELF: {
+ StkId rb = RB(i);
+ setobjs2s(L, ra+1, rb);
+ Protect(luaV_gettable(L, rb, RKC(i), ra));
+ continue;
+ }
+ case OP_ADD: {
+ arith_op(luai_numadd, TM_ADD);
+ continue;
+ }
+ case OP_SUB: {
+ arith_op(luai_numsub, TM_SUB);
+ continue;
+ }
+ case OP_MUL: {
+ arith_op(luai_nummul, TM_MUL);
+ continue;
+ }
+ case OP_DIV: {
+ arith_op(luai_numdiv, TM_DIV);
+ continue;
+ }
+ case OP_MOD: {
+ arith_op(luai_nummod, TM_MOD);
+ continue;
+ }
+ case OP_POW: {
+ arith_op(luai_numpow, TM_POW);
+ continue;
+ }
+ case OP_UNM: {
+ TValue *rb = RB(i);
+ if (ttisnumber(rb)) {
+ lua_Number nb = nvalue(rb);
+ setnvalue(ra, luai_numunm(nb));
+ }
+ else {
+ Protect(Arith(L, ra, rb, rb, TM_UNM));
+ }
+ continue;
+ }
+ case OP_NOT: {
+ int res = l_isfalse(RB(i)); /* next assignment may change this value */
+ setbvalue(ra, res);
+ continue;
+ }
+ case OP_LEN: {
+ const TValue *rb = RB(i);
+ switch (ttype(rb)) {
+ case LUA_TTABLE: {
+ setnvalue(ra, cast_num(luaH_getn(hvalue(rb))));
+ break;
+ }
+ case LUA_TSTRING: {
+ setnvalue(ra, cast_num(tsvalue(rb)->len));
+ break;
+ }
+ default: { /* try metamethod */
+ Protect(
+ if (!call_binTM(L, rb, luaO_nilobject, ra, TM_LEN))
+ luaG_typeerror(L, rb, "get length of");
+ )
+ }
+ }
+ continue;
+ }
+ case OP_CONCAT: {
+ int b = GETARG_B(i);
+ int c = GETARG_C(i);
+ Protect(luaV_concat(L, c-b+1, c); luaC_checkGC(L));
+ setobjs2s(L, RA(i), base+b);
+ continue;
+ }
+ case OP_JMP: {
+ dojump(L, pc, GETARG_sBx(i));
+ continue;
+ }
+ case OP_EQ: {
+ TValue *rb = RKB(i);
+ TValue *rc = RKC(i);
+ Protect(
+ if (equalobj(L, rb, rc) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_LT: {
+ Protect(
+ if (luaV_lessthan(L, RKB(i), RKC(i)) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_LE: {
+ Protect(
+ if (lessequal(L, RKB(i), RKC(i)) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_TEST: {
+ if (l_isfalse(ra) != GETARG_C(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ pc++;
+ continue;
+ }
+ case OP_TESTSET: {
+ TValue *rb = RB(i);
+ if (l_isfalse(rb) != GETARG_C(i)) {
+ setobjs2s(L, ra, rb);
+ dojump(L, pc, GETARG_sBx(*pc));
+ }
+ pc++;
+ continue;
+ }
+ case OP_CALL: {
+ int b = GETARG_B(i);
+ int nresults = GETARG_C(i) - 1;
+ if (b != 0) L->top = ra+b; /* else previous instruction set top */
+ L->savedpc = pc;
+ switch (luaD_precall(L, ra, nresults)) {
+ case PCRLUA: {
+ nexeccalls++;
+ goto reentry; /* restart luaV_execute over new Lua function */
+ }
+ case PCRC: {
+ /* it was a C function (`precall' called it); adjust results */
+ if (nresults >= 0) L->top = L->ci->top;
+ base = L->base;
+ continue;
+ }
+ default: {
+ return; /* yield */
+ }
+ }
+ }
+ case OP_TAILCALL: {
+ int b = GETARG_B(i);
+ if (b != 0) L->top = ra+b; /* else previous instruction set top */
+ L->savedpc = pc;
+ lua_assert(GETARG_C(i) - 1 == LUA_MULTRET);
+ switch (luaD_precall(L, ra, LUA_MULTRET)) {
+ case PCRLUA: {
+ /* tail call: put new frame in place of previous one */
+ CallInfo *ci = L->ci - 1; /* previous frame */
+ int aux;
+ StkId func = ci->func;
+ StkId pfunc = (ci+1)->func; /* previous function index */
+ if (L->openupval) luaF_close(L, ci->base);
+ L->base = ci->base = ci->func + ((ci+1)->base - pfunc);
+ for (aux = 0; pfunc+aux < L->top; aux++) /* move frame down */
+ setobjs2s(L, func+aux, pfunc+aux);
+ ci->top = L->top = func+aux; /* correct top */
+ lua_assert(L->top == L->base + clvalue(func)->l.p->maxstacksize);
+ ci->savedpc = L->savedpc;
+ ci->tailcalls++; /* one more call lost */
+ L->ci--; /* remove new frame */
+ goto reentry;
+ }
+ case PCRC: { /* it was a C function (`precall' called it) */
+ base = L->base;
+ continue;
+ }
+ default: {
+ return; /* yield */
+ }
+ }
+ }
+ case OP_RETURN: {
+ int b = GETARG_B(i);
+ if (b != 0) L->top = ra+b-1;
+ if (L->openupval) luaF_close(L, base);
+ L->savedpc = pc;
+ b = luaD_poscall(L, ra);
+ if (--nexeccalls == 0) /* was previous function running `here'? */
+ return; /* no: return */
+ else { /* yes: continue its execution */
+ if (b) L->top = L->ci->top;
+ lua_assert(isLua(L->ci));
+ lua_assert(GET_OPCODE(*((L->ci)->savedpc - 1)) == OP_CALL);
+ goto reentry;
+ }
+ }
+ case OP_FORLOOP: {
+ lua_Number step = nvalue(ra+2);
+ lua_Number idx = luai_numadd(nvalue(ra), step); /* increment index */
+ lua_Number limit = nvalue(ra+1);
+ if (luai_numlt(0, step) ? luai_numle(idx, limit)
+ : luai_numle(limit, idx)) {
+ dojump(L, pc, GETARG_sBx(i)); /* jump back */
+ setnvalue(ra, idx); /* update internal index... */
+ setnvalue(ra+3, idx); /* ...and external index */
+ }
+ continue;
+ }
+ case OP_FORPREP: {
+ const TValue *init = ra;
+ const TValue *plimit = ra+1;
+ const TValue *pstep = ra+2;
+ L->savedpc = pc; /* next steps may throw errors */
+ if (!tonumber(init, ra))
+ luaG_runerror(L, LUA_QL("for") " initial value must be a number");
+ else if (!tonumber(plimit, ra+1))
+ luaG_runerror(L, LUA_QL("for") " limit must be a number");
+ else if (!tonumber(pstep, ra+2))
+ luaG_runerror(L, LUA_QL("for") " step must be a number");
+ setnvalue(ra, luai_numsub(nvalue(ra), nvalue(pstep)));
+ dojump(L, pc, GETARG_sBx(i));
+ continue;
+ }
+ case OP_TFORLOOP: {
+ StkId cb = ra + 3; /* call base */
+ setobjs2s(L, cb+2, ra+2);
+ setobjs2s(L, cb+1, ra+1);
+ setobjs2s(L, cb, ra);
+ L->top = cb+3; /* func. + 2 args (state and index) */
+ Protect(luaD_call(L, cb, GETARG_C(i)));
+ L->top = L->ci->top;
+ cb = RA(i) + 3; /* previous call may change the stack */
+ if (!ttisnil(cb)) { /* continue loop? */
+ setobjs2s(L, cb-1, cb); /* save control variable */
+ dojump(L, pc, GETARG_sBx(*pc)); /* jump back */
+ }
+ pc++;
+ continue;
+ }
+ case OP_SETLIST: {
+ int n = GETARG_B(i);
+ int c = GETARG_C(i);
+ int last;
+ Table *h;
+ if (n == 0) {
+ n = cast_int(L->top - ra) - 1;
+ L->top = L->ci->top;
+ }
+ if (c == 0) c = cast_int(*pc++);
+ runtime_check(L, ttistable(ra));
+ h = hvalue(ra);
+ last = ((c-1)*LFIELDS_PER_FLUSH) + n;
+ if (last > h->sizearray) /* needs more space? */
+ luaH_resizearray(L, h, last); /* pre-alloc it at once */
+ for (; n > 0; n--) {
+ TValue *val = ra+n;
+ setobj2t(L, luaH_setnum(L, h, last--), val);
+ luaC_barriert(L, h, val);
+ }
+ continue;
+ }
+ case OP_CLOSE: {
+ luaF_close(L, ra);
+ continue;
+ }
+ case OP_CLOSURE: {
+ Proto *p;
+ Closure *ncl;
+ int nup, j;
+ p = cl->p->p[GETARG_Bx(i)];
+ nup = p->nups;
+ ncl = luaF_newLclosure(L, nup, cl->env);
+ ncl->l.p = p;
+ for (j=0; jl.upvals[j] = cl->upvals[GETARG_B(*pc)];
+ else {
+ lua_assert(GET_OPCODE(*pc) == OP_MOVE);
+ ncl->l.upvals[j] = luaF_findupval(L, base + GETARG_B(*pc));
+ }
+ }
+ setclvalue(L, ra, ncl);
+ Protect(luaC_checkGC(L));
+ continue;
+ }
+ case OP_VARARG: {
+ int b = GETARG_B(i) - 1;
+ int j;
+ CallInfo *ci = L->ci;
+ int n = cast_int(ci->base - ci->func) - cl->p->numparams - 1;
+ if (b == LUA_MULTRET) {
+ Protect(luaD_checkstack(L, n));
+ ra = RA(i); /* previous call may change the stack */
+ b = n;
+ L->top = ra + n;
+ }
+ for (j = 0; j < b; j++) {
+ if (j < n) {
+ setobjs2s(L, ra + j, ci->base - n + j);
+ }
+ else {
+ setnilvalue(ra + j);
+ }
+ }
+ continue;
+ }
+ }
+ }
+}
+
diff --git a/deps/lua/src/lvm.h b/deps/lua/src/lvm.h
new file mode 100644
index 0000000000000000000000000000000000000000..788423f8e31f8e9bcf30415584faaec768ea767a
--- /dev/null
+++ b/deps/lua/src/lvm.h
@@ -0,0 +1,36 @@
+/*
+** $Id: lvm.h,v 2.5 2005/08/22 18:54:49 roberto Exp $
+** Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lvm_h
+#define lvm_h
+
+
+#include "ldo.h"
+#include "lobject.h"
+#include "ltm.h"
+
+
+#define tostring(L,o) ((ttype(o) == LUA_TSTRING) || (luaV_tostring(L, o)))
+
+#define tonumber(o,n) (ttype(o) == LUA_TNUMBER || \
+ (((o) = luaV_tonumber(o,n)) != NULL))
+
+#define equalobj(L,o1,o2) \
+ (ttype(o1) == ttype(o2) && luaV_equalval(L, o1, o2))
+
+
+LUAI_FUNC int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r);
+LUAI_FUNC int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2);
+LUAI_FUNC const TValue *luaV_tonumber (const TValue *obj, TValue *n);
+LUAI_FUNC int luaV_tostring (lua_State *L, StkId obj);
+LUAI_FUNC void luaV_gettable (lua_State *L, const TValue *t, TValue *key,
+ StkId val);
+LUAI_FUNC void luaV_settable (lua_State *L, const TValue *t, TValue *key,
+ StkId val);
+LUAI_FUNC void luaV_execute (lua_State *L, int nexeccalls);
+LUAI_FUNC void luaV_concat (lua_State *L, int total, int last);
+
+#endif
diff --git a/deps/lua/src/lzio.c b/deps/lua/src/lzio.c
new file mode 100644
index 0000000000000000000000000000000000000000..5121ada8466e75cfe60cf9cde8282c30499892b8
--- /dev/null
+++ b/deps/lua/src/lzio.c
@@ -0,0 +1,82 @@
+/*
+** $Id: lzio.c,v 1.31 2005/06/03 20:15:29 roberto Exp $
+** a generic input stream interface
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lzio_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "llimits.h"
+#include "lmem.h"
+#include "lstate.h"
+#include "lzio.h"
+
+
+int luaZ_fill (ZIO *z) {
+ size_t size;
+ lua_State *L = z->L;
+ const char *buff;
+ lua_unlock(L);
+ buff = z->reader(L, z->data, &size);
+ lua_lock(L);
+ if (buff == NULL || size == 0) return EOZ;
+ z->n = size - 1;
+ z->p = buff;
+ return char2int(*(z->p++));
+}
+
+
+int luaZ_lookahead (ZIO *z) {
+ if (z->n == 0) {
+ if (luaZ_fill(z) == EOZ)
+ return EOZ;
+ else {
+ z->n++; /* luaZ_fill removed first byte; put back it */
+ z->p--;
+ }
+ }
+ return char2int(*z->p);
+}
+
+
+void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, void *data) {
+ z->L = L;
+ z->reader = reader;
+ z->data = data;
+ z->n = 0;
+ z->p = NULL;
+}
+
+
+/* --------------------------------------------------------------- read --- */
+size_t luaZ_read (ZIO *z, void *b, size_t n) {
+ while (n) {
+ size_t m;
+ if (luaZ_lookahead(z) == EOZ)
+ return n; /* return number of missing bytes */
+ m = (n <= z->n) ? n : z->n; /* min. between n and z->n */
+ memcpy(b, z->p, m);
+ z->n -= m;
+ z->p += m;
+ b = (char *)b + m;
+ n -= m;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n) {
+ if (n > buff->buffsize) {
+ if (n < LUA_MINBUFFER) n = LUA_MINBUFFER;
+ luaZ_resizebuffer(L, buff, n);
+ }
+ return buff->buffer;
+}
+
+
diff --git a/deps/lua/src/lzio.h b/deps/lua/src/lzio.h
new file mode 100644
index 0000000000000000000000000000000000000000..8f403b8e74f5e4562328ebfa76f9c0ef4c77266d
--- /dev/null
+++ b/deps/lua/src/lzio.h
@@ -0,0 +1,67 @@
+/*
+** $Id: lzio.h,v 1.21 2005/05/17 19:49:15 roberto Exp $
+** Buffered streams
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lzio_h
+#define lzio_h
+
+#include "lua.h"
+
+#include "lmem.h"
+
+
+#define EOZ (-1) /* end of stream */
+
+typedef struct Zio ZIO;
+
+#define char2int(c) cast(int, cast(unsigned char, (c)))
+
+#define zgetc(z) (((z)->n--)>0 ? char2int(*(z)->p++) : luaZ_fill(z))
+
+typedef struct Mbuffer {
+ char *buffer;
+ size_t n;
+ size_t buffsize;
+} Mbuffer;
+
+#define luaZ_initbuffer(L, buff) ((buff)->buffer = NULL, (buff)->buffsize = 0)
+
+#define luaZ_buffer(buff) ((buff)->buffer)
+#define luaZ_sizebuffer(buff) ((buff)->buffsize)
+#define luaZ_bufflen(buff) ((buff)->n)
+
+#define luaZ_resetbuffer(buff) ((buff)->n = 0)
+
+
+#define luaZ_resizebuffer(L, buff, size) \
+ (luaM_reallocvector(L, (buff)->buffer, (buff)->buffsize, size, char), \
+ (buff)->buffsize = size)
+
+#define luaZ_freebuffer(L, buff) luaZ_resizebuffer(L, buff, 0)
+
+
+LUAI_FUNC char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n);
+LUAI_FUNC void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader,
+ void *data);
+LUAI_FUNC size_t luaZ_read (ZIO* z, void* b, size_t n); /* read next n bytes */
+LUAI_FUNC int luaZ_lookahead (ZIO *z);
+
+
+
+/* --------- Private Part ------------------ */
+
+struct Zio {
+ size_t n; /* bytes still unread */
+ const char *p; /* current position in buffer */
+ lua_Reader reader;
+ void* data; /* additional data */
+ lua_State *L; /* Lua state (for reader) */
+};
+
+
+LUAI_FUNC int luaZ_fill (ZIO *z);
+
+#endif
diff --git a/deps/lua/src/print.c b/deps/lua/src/print.c
new file mode 100644
index 0000000000000000000000000000000000000000..1c3a4457c498c68bf58b3f790aaae76610cafa8c
--- /dev/null
+++ b/deps/lua/src/print.c
@@ -0,0 +1,224 @@
+/*
+** $Id: print.c,v 1.54 2006/01/11 22:49:27 lhf Exp $
+** print bytecodes
+** See Copyright Notice in lua.h
+*/
+
+#include
+#include
+
+#define luac_c
+#define LUA_CORE
+
+#include "ldebug.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lundump.h"
+
+#define PrintFunction luaU_print
+
+#define Sizeof(x) ((int)sizeof(x))
+#define VOID(p) ((const void*)(p))
+
+static void PrintString(const Proto* f, int n)
+{
+ const char* s=svalue(&f->k[n]);
+ putchar('"');
+ for (; *s; s++)
+ {
+ switch (*s)
+ {
+ case '"': printf("\\\""); break;
+ case '\a': printf("\\a"); break;
+ case '\b': printf("\\b"); break;
+ case '\f': printf("\\f"); break;
+ case '\n': printf("\\n"); break;
+ case '\r': printf("\\r"); break;
+ case '\t': printf("\\t"); break;
+ case '\v': printf("\\v"); break;
+ default: if (isprint((unsigned char)*s))
+ printf("%c",*s);
+ else
+ printf("\\%03u",(unsigned char)*s);
+ }
+ }
+ putchar('"');
+}
+
+static void PrintConstant(const Proto* f, int i)
+{
+ const TValue* o=&f->k[i];
+ switch (ttype(o))
+ {
+ case LUA_TNIL:
+ printf("nil");
+ break;
+ case LUA_TBOOLEAN:
+ printf(bvalue(o) ? "true" : "false");
+ break;
+ case LUA_TNUMBER:
+ printf(LUA_NUMBER_FMT,nvalue(o));
+ break;
+ case LUA_TSTRING:
+ PrintString(f,i);
+ break;
+ default: /* cannot happen */
+ printf("? type=%d",ttype(o));
+ break;
+ }
+}
+
+static void PrintCode(const Proto* f)
+{
+ const Instruction* code=f->code;
+ int pc,n=f->sizecode;
+ for (pc=0; pc0) printf("[%d]\t",line); else printf("[-]\t");
+ printf("%-9s\t",luaP_opnames[o]);
+ switch (getOpMode(o))
+ {
+ case iABC:
+ printf("%d",a);
+ if (getBMode(o)!=OpArgN) printf(" %d",ISK(b) ? (-1-INDEXK(b)) : b);
+ if (getCMode(o)!=OpArgN) printf(" %d",ISK(c) ? (-1-INDEXK(c)) : c);
+ break;
+ case iABx:
+ if (getBMode(o)==OpArgK) printf("%d %d",a,-1-bx); else printf("%d %d",a,bx);
+ break;
+ case iAsBx:
+ if (o==OP_JMP) printf("%d",sbx); else printf("%d %d",a,sbx);
+ break;
+ }
+ switch (o)
+ {
+ case OP_LOADK:
+ printf("\t; "); PrintConstant(f,bx);
+ break;
+ case OP_GETUPVAL:
+ case OP_SETUPVAL:
+ printf("\t; %s", (f->sizeupvalues>0) ? getstr(f->upvalues[b]) : "-");
+ break;
+ case OP_GETGLOBAL:
+ case OP_SETGLOBAL:
+ printf("\t; %s",svalue(&f->k[bx]));
+ break;
+ case OP_GETTABLE:
+ case OP_SELF:
+ if (ISK(c)) { printf("\t; "); PrintConstant(f,INDEXK(c)); }
+ break;
+ case OP_SETTABLE:
+ case OP_ADD:
+ case OP_SUB:
+ case OP_MUL:
+ case OP_DIV:
+ case OP_POW:
+ case OP_EQ:
+ case OP_LT:
+ case OP_LE:
+ if (ISK(b) || ISK(c))
+ {
+ printf("\t; ");
+ if (ISK(b)) PrintConstant(f,INDEXK(b)); else printf("-");
+ printf(" ");
+ if (ISK(c)) PrintConstant(f,INDEXK(c)); else printf("-");
+ }
+ break;
+ case OP_JMP:
+ case OP_FORLOOP:
+ case OP_FORPREP:
+ printf("\t; to %d",sbx+pc+2);
+ break;
+ case OP_CLOSURE:
+ printf("\t; %p",VOID(f->p[bx]));
+ break;
+ case OP_SETLIST:
+ if (c==0) printf("\t; %d",(int)code[++pc]);
+ else printf("\t; %d",c);
+ break;
+ default:
+ break;
+ }
+ printf("\n");
+ }
+}
+
+#define SS(x) (x==1)?"":"s"
+#define S(x) x,SS(x)
+
+static void PrintHeader(const Proto* f)
+{
+ const char* s=getstr(f->source);
+ if (*s=='@' || *s=='=')
+ s++;
+ else if (*s==LUA_SIGNATURE[0])
+ s="(bstring)";
+ else
+ s="(string)";
+ printf("\n%s <%s:%d,%d> (%d instruction%s, %d bytes at %p)\n",
+ (f->linedefined==0)?"main":"function",s,
+ f->linedefined,f->lastlinedefined,
+ S(f->sizecode),f->sizecode*Sizeof(Instruction),VOID(f));
+ printf("%d%s param%s, %d slot%s, %d upvalue%s, ",
+ f->numparams,f->is_vararg?"+":"",SS(f->numparams),
+ S(f->maxstacksize),S(f->nups));
+ printf("%d local%s, %d constant%s, %d function%s\n",
+ S(f->sizelocvars),S(f->sizek),S(f->sizep));
+}
+
+static void PrintConstants(const Proto* f)
+{
+ int i,n=f->sizek;
+ printf("constants (%d) for %p:\n",n,VOID(f));
+ for (i=0; isizelocvars;
+ printf("locals (%d) for %p:\n",n,VOID(f));
+ for (i=0; ilocvars[i].varname),f->locvars[i].startpc+1,f->locvars[i].endpc+1);
+ }
+}
+
+static void PrintUpvalues(const Proto* f)
+{
+ int i,n=f->sizeupvalues;
+ printf("upvalues (%d) for %p:\n",n,VOID(f));
+ if (f->upvalues==NULL) return;
+ for (i=0; iupvalues[i]));
+ }
+}
+
+void PrintFunction(const Proto* f, int full)
+{
+ int i,n=f->sizep;
+ PrintHeader(f);
+ PrintCode(f);
+ if (full)
+ {
+ PrintConstants(f);
+ PrintLocals(f);
+ PrintUpvalues(f);
+ }
+ for (i=0; ip[i],full);
+}
diff --git a/deps/lua/test/README b/deps/lua/test/README
new file mode 100644
index 0000000000000000000000000000000000000000..0c7f38bc25bdf6d1d336e60c12abed960dc961e8
--- /dev/null
+++ b/deps/lua/test/README
@@ -0,0 +1,26 @@
+These are simple tests for Lua. Some of them contain useful code.
+They are meant to be run to make sure Lua is built correctly and also
+to be read, to see how Lua programs look.
+
+Here is a one-line summary of each program:
+
+ bisect.lua bisection method for solving non-linear equations
+ cf.lua temperature conversion table (celsius to farenheit)
+ echo.lua echo command line arguments
+ env.lua environment variables as automatic global variables
+ factorial.lua factorial without recursion
+ fib.lua fibonacci function with cache
+ fibfor.lua fibonacci numbers with coroutines and generators
+ globals.lua report global variable usage
+ hello.lua the first program in every language
+ life.lua Conway's Game of Life
+ luac.lua bare-bones luac
+ printf.lua an implementation of printf
+ readonly.lua make global variables readonly
+ sieve.lua the sieve of of Eratosthenes programmed with coroutines
+ sort.lua two implementations of a sort function
+ table.lua make table, grouping all data for the same item
+ trace-calls.lua trace calls
+ trace-globals.lua trace assigments to global variables
+ xd.lua hex dump
+
diff --git a/deps/lua/test/bisect.lua b/deps/lua/test/bisect.lua
new file mode 100644
index 0000000000000000000000000000000000000000..f91e69bfbaf6710cc4ec99fee38aa37631c964de
--- /dev/null
+++ b/deps/lua/test/bisect.lua
@@ -0,0 +1,27 @@
+-- bisection method for solving non-linear equations
+
+delta=1e-6 -- tolerance
+
+function bisect(f,a,b,fa,fb)
+ local c=(a+b)/2
+ io.write(n," c=",c," a=",a," b=",b,"\n")
+ if c==a or c==b or math.abs(a-b) posted to lua-l
+-- modified to use ANSI terminal escape sequences
+-- modified to use for instead of while
+
+local write=io.write
+
+ALIVE="" DEAD=""
+ALIVE="O" DEAD="-"
+
+function delay() -- NOTE: SYSTEM-DEPENDENT, adjust as necessary
+ for i=1,10000 do end
+ -- local i=os.clock()+1 while(os.clock() 0 do
+ local xm1,x,xp1,xi=self.w-1,self.w,1,self.w
+ while xi > 0 do
+ local sum = self[ym1][xm1] + self[ym1][x] + self[ym1][xp1] +
+ self[y][xm1] + self[y][xp1] +
+ self[yp1][xm1] + self[yp1][x] + self[yp1][xp1]
+ next[y][x] = ((sum==2) and self[y][x]) or ((sum==3) and 1) or 0
+ xm1,x,xp1,xi = x,xp1,xp1+1,xi-1
+ end
+ ym1,y,yp1,yi = y,yp1,yp1+1,yi-1
+ end
+end
+
+-- output the array to screen
+function _CELLS:draw()
+ local out="" -- accumulate to reduce flicker
+ for y=1,self.h do
+ for x=1,self.w do
+ out=out..(((self[y][x]>0) and ALIVE) or DEAD)
+ end
+ out=out.."\n"
+ end
+ write(out)
+end
+
+-- constructor
+function CELLS(w,h)
+ local c = ARRAY2D(w,h)
+ c.spawn = _CELLS.spawn
+ c.evolve = _CELLS.evolve
+ c.draw = _CELLS.draw
+ return c
+end
+
+--
+-- shapes suitable for use with spawn() above
+--
+HEART = { 1,0,1,1,0,1,1,1,1; w=3,h=3 }
+GLIDER = { 0,0,1,1,0,1,0,1,1; w=3,h=3 }
+EXPLODE = { 0,1,0,1,1,1,1,0,1,0,1,0; w=3,h=4 }
+FISH = { 0,1,1,1,1,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0; w=5,h=4 }
+BUTTERFLY = { 1,0,0,0,1,0,1,1,1,0,1,0,0,0,1,1,0,1,0,1,1,0,0,0,1; w=5,h=5 }
+
+-- the main routine
+function LIFE(w,h)
+ -- create two arrays
+ local thisgen = CELLS(w,h)
+ local nextgen = CELLS(w,h)
+
+ -- create some life
+ -- about 1000 generations of fun, then a glider steady-state
+ thisgen:spawn(GLIDER,5,4)
+ thisgen:spawn(EXPLODE,25,10)
+ thisgen:spawn(FISH,4,12)
+
+ -- run until break
+ local gen=1
+ write("\027[2J") -- ANSI clear screen
+ while 1 do
+ thisgen:evolve(nextgen)
+ thisgen,nextgen = nextgen,thisgen
+ write("\027[H") -- ANSI home cursor
+ thisgen:draw()
+ write("Life - generation ",gen,"\n")
+ gen=gen+1
+ if gen>2000 then break end
+ --delay() -- no delay
+ end
+end
+
+LIFE(40,20)
diff --git a/deps/lua/test/luac.lua b/deps/lua/test/luac.lua
new file mode 100644
index 0000000000000000000000000000000000000000..96a0a97ce7aa2704c9b8b409bcc14f1a80c746ca
--- /dev/null
+++ b/deps/lua/test/luac.lua
@@ -0,0 +1,7 @@
+-- bare-bones luac in Lua
+-- usage: lua luac.lua file.lua
+
+assert(arg[1]~=nil and arg[2]==nil,"usage: lua luac.lua file.lua")
+f=assert(io.open("luac.out","wb"))
+assert(f:write(string.dump(assert(loadfile(arg[1])))))
+assert(f:close())
diff --git a/deps/lua/test/printf.lua b/deps/lua/test/printf.lua
new file mode 100644
index 0000000000000000000000000000000000000000..58c63ff5184e4f4274f9e0408a2959526c365ac0
--- /dev/null
+++ b/deps/lua/test/printf.lua
@@ -0,0 +1,7 @@
+-- an implementation of printf
+
+function printf(...)
+ io.write(string.format(...))
+end
+
+printf("Hello %s from %s on %s\n",os.getenv"USER" or "there",_VERSION,os.date())
diff --git a/deps/lua/test/readonly.lua b/deps/lua/test/readonly.lua
new file mode 100644
index 0000000000000000000000000000000000000000..85c0b4e01324d0cf5f87495d44a6da26ad4cce96
--- /dev/null
+++ b/deps/lua/test/readonly.lua
@@ -0,0 +1,12 @@
+-- make global variables readonly
+
+local f=function (t,i) error("cannot redefine global variable `"..i.."'",2) end
+local g={}
+local G=getfenv()
+setmetatable(g,{__index=G,__newindex=f})
+setfenv(1,g)
+
+-- an example
+rawset(g,"x",3)
+x=2
+y=1 -- cannot redefine `y'
diff --git a/deps/lua/test/sieve.lua b/deps/lua/test/sieve.lua
new file mode 100644
index 0000000000000000000000000000000000000000..0871bb212592726d5cca2c9478e9fcaf12c8ff09
--- /dev/null
+++ b/deps/lua/test/sieve.lua
@@ -0,0 +1,29 @@
+-- the sieve of of Eratosthenes programmed with coroutines
+-- typical usage: lua -e N=1000 sieve.lua | column
+
+-- generate all the numbers from 2 to n
+function gen (n)
+ return coroutine.wrap(function ()
+ for i=2,n do coroutine.yield(i) end
+ end)
+end
+
+-- filter the numbers generated by `g', removing multiples of `p'
+function filter (p, g)
+ return coroutine.wrap(function ()
+ while 1 do
+ local n = g()
+ if n == nil then return end
+ if math.mod(n, p) ~= 0 then coroutine.yield(n) end
+ end
+ end)
+end
+
+N=N or 1000 -- from command line
+x = gen(N) -- generate primes up to N
+while 1 do
+ local n = x() -- pick a number until done
+ if n == nil then break end
+ print(n) -- must be a prime number
+ x = filter(n, x) -- now remove its multiples
+end
diff --git a/deps/lua/test/sort.lua b/deps/lua/test/sort.lua
new file mode 100644
index 0000000000000000000000000000000000000000..0bcb15f837a7acd123b5426b3ecd90badbe5a6e1
--- /dev/null
+++ b/deps/lua/test/sort.lua
@@ -0,0 +1,66 @@
+-- two implementations of a sort function
+-- this is an example only. Lua has now a built-in function "sort"
+
+-- extracted from Programming Pearls, page 110
+function qsort(x,l,u,f)
+ if ly end)
+ show("after reverse selection sort",x)
+ qsort(x,1,n,function (x,y) return x>> ",string.rep(" ",level))
+ if t~=nil and t.currentline>=0 then io.write(t.short_src,":",t.currentline," ") end
+ t=debug.getinfo(2)
+ if event=="call" then
+ level=level+1
+ else
+ level=level-1 if level<0 then level=0 end
+ end
+ if t.what=="main" then
+ if event=="call" then
+ io.write("begin ",t.short_src)
+ else
+ io.write("end ",t.short_src)
+ end
+ elseif t.what=="Lua" then
+-- table.foreach(t,print)
+ io.write(event," ",t.name or "(Lua)"," <",t.linedefined,":",t.short_src,">")
+ else
+ io.write(event," ",t.name or "(C)"," [",t.what,"] ")
+ end
+ io.write("\n")
+end
+
+debug.sethook(hook,"cr")
+level=0
diff --git a/deps/lua/test/trace-globals.lua b/deps/lua/test/trace-globals.lua
new file mode 100644
index 0000000000000000000000000000000000000000..295e670caa2bc0aa95ea822b2a68c4305f6d31f0
--- /dev/null
+++ b/deps/lua/test/trace-globals.lua
@@ -0,0 +1,38 @@
+-- trace assigments to global variables
+
+do
+ -- a tostring that quotes strings. note the use of the original tostring.
+ local _tostring=tostring
+ local tostring=function(a)
+ if type(a)=="string" then
+ return string.format("%q",a)
+ else
+ return _tostring(a)
+ end
+ end
+
+ local log=function (name,old,new)
+ local t=debug.getinfo(3,"Sl")
+ local line=t.currentline
+ io.write(t.short_src)
+ if line>=0 then io.write(":",line) end
+ io.write(": ",name," is now ",tostring(new)," (was ",tostring(old),")","\n")
+ end
+
+ local g={}
+ local set=function (t,name,value)
+ log(name,g[name],value)
+ g[name]=value
+ end
+ setmetatable(getfenv(),{__index=g,__newindex=set})
+end
+
+-- an example
+
+a=1
+b=2
+a=10
+b=20
+b=nil
+b=200
+print(a,b,c)
diff --git a/deps/lua/test/xd.lua b/deps/lua/test/xd.lua
new file mode 100644
index 0000000000000000000000000000000000000000..ebc3effc06bfde46331640f4ddafcf47fea682fe
--- /dev/null
+++ b/deps/lua/test/xd.lua
@@ -0,0 +1,14 @@
+-- hex dump
+-- usage: lua xd.lua < file
+
+local offset=0
+while true do
+ local s=io.read(16)
+ if s==nil then return end
+ io.write(string.format("%08X ",offset))
+ string.gsub(s,"(.)",
+ function (c) io.write(string.format("%02X ",string.byte(c))) end)
+ io.write(string.rep(" ",3*(16-string.len(s))))
+ io.write(" ",string.gsub(s,"%c","."),"\n")
+ offset=offset+16
+end
diff --git a/deps/pthread/CMakeLists.txt b/deps/pthread/CMakeLists.txt
index 04e5be7472a9b8cbdb384348697b919bf2dd0ece..16d03f3590bf933c383dd1294b1117fd9f95ad7a 100644
--- a/deps/pthread/CMakeLists.txt
+++ b/deps/pthread/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_WINDOWS)
diff --git a/deps/regex/CMakeLists.txt b/deps/regex/CMakeLists.txt
index 054b093d07c386d7ff9b0ffc4c05909d79b33129..05d01f02efa4c731bb67f6f5f654b499f6f2be03 100644
--- a/deps/regex/CMakeLists.txt
+++ b/deps/regex/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_WINDOWS)
diff --git a/deps/wepoll/CMakeLists.txt b/deps/wepoll/CMakeLists.txt
index a81fd782bbc4b05a1158273a7fcc6701bc4d980d..e9b7749d82e381e7002f7bca65dc6d5a4e1a7740 100644
--- a/deps/wepoll/CMakeLists.txt
+++ b/deps/wepoll/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_WINDOWS)
diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt
index f83aa70085491fb6575c0a6bf93252192cddd040..1220cc4246b4cef9b0709e2f14dec46ba787c4cc 100644
--- a/deps/zlib-1.2.11/CMakeLists.txt
+++ b/deps/zlib-1.2.11/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_WINDOWS)
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index c900cd373d7b48904f021e083afd82399770a0a7..18bdc15d30430516c3ae6c847fc448477003dd66 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -63,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [高级功能](/advanced-features)
* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算
-* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据
+* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据
* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取
* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送
@@ -106,6 +106,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入
* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出
* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等
+* [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md
index 6eb58a1433ed0d43b313a9dc979ae5873ba00e8f..fa364816465a4dac445902c0577c3f5f0435a143 100644
--- a/documentation20/cn/02.getting-started/docs.md
+++ b/documentation20/cn/02.getting-started/docs.md
@@ -2,25 +2,25 @@
## 快捷安装
-TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。
+TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。
### 通过源码安装
-请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装.
+请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码并安装.
-### 通过Docker容器运行
+### 通过 Docker 容器运行
-暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。
+暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。
-详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
+详细操作方法请参照 [通过 Docker 快速体验 TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
### 通过安装包安装
-TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
+TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
-安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
+安装包下载在 [这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
-具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
+具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
## 轻松启动
@@ -53,21 +53,21 @@ $ systemctl status taosd
如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
-## TDengine命令行程序
+## TDengine 命令行程序
-执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。
+执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。
```bash
$ taos
```
-如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下:
+如果 TDengine 终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。TDengine 终端的提示符号如下:
```cmd
taos>
```
-在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:
+在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
```mysql
create database demo;
@@ -76,24 +76,24 @@ create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
- ts | speed |
-===================================
- 19-07-15 00:00:00.000| 10|
- 19-07-15 01:00:00.000| 20|
-Query OK, 2 row(s) in set (0.001700s)
+ ts | speed |
+========================================
+ 2019-07-15 00:00:00.000 | 10 |
+ 2019-07-15 01:00:00.000 | 20 |
+Query OK, 2 row(s) in set (0.003128s)
```
-除执行SQL语句外,系统管理员还可以从TDengine终端检查系统运行状态,添加删除用户账号等。
+除执行 SQL 语句外,系统管理员还可以从 TDengine 终端检查系统运行状态,添加删除用户账号等。
### 命令行参数
-您可通过配置命令行参数来改变TDengine终端的行为。以下为常用的几个命令行参数:
+您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数:
-- -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_
-- -h, --host: 指定服务的IP地址,默认为本地服务
-- -s, --commands: 在不进入终端的情况下运行TDengine命令
-- -u, -- user: 连接TDengine服务器的用户名,缺省为root
-- -p, --password: 连接TDengine服务器的密码,缺省为taosdata
+- -c, --config-dir: 指定配置文件目录,默认为 _/etc/taos_
+- -h, --host: 指定服务的 FQDN 地址(也可以使用 IP),默认为连接本地服务
+- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
+- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root
+- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata
- -?, --help: 打印出所有命令行参数
示例:
@@ -102,7 +102,7 @@ Query OK, 2 row(s) in set (0.001700s)
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
-### 运行SQL命令脚本
+### 运行 SQL 命令脚本
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
@@ -110,27 +110,27 @@ TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
taos> source ;
```
-### Shell小技巧
+### Shell 小技巧
- 可以使用上下光标键查看历史输入的指令
-- 修改用户密码。在 shell 中使用 alter user 指令
+- 修改用户密码,在 shell 中使用 alter user 指令
- ctrl+c 中止正在进行中的查询
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
## TDengine 极速体验
-启动TDengine的服务,在Linux终端执行taosdemo
+启动 TDengine 的服务,在 Linux 终端执行 taosdemo
```bash
$ taosdemo
```
-该命令将在数据库test下面自动创建一张超级表meters,该超级表下有1万张表,表名为"t0" 到"t9999",每张表有10万条记录,每条记录有 (f1, f2, f3)三个字段,时间戳从"2017-07-14 10:40:00 000" 到"2017-07-14 10:41:39 999",每张表带有标签areaid和loc, areaid被设置为1到10, loc被设置为"beijing"或者“shanghai"。
+该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "t0" 到 "t9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
-执行这条命令大概需要10分钟,最后共插入10亿条记录。
+执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
-在TDengine客户端输入查询命令,体验查询速度。
+在 TDengine 客户端输入查询命令,体验查询速度。
- 查询超级表下记录总条数:
@@ -138,49 +138,43 @@ $ taosdemo
taos> select count(*) from test.meters;
```
-- 查询10亿条记录的平均值、最大值、最小值等:
+- 查询 1 亿条记录的平均值、最大值、最小值等:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.meters;
+taos> select avg(current), max(voltage), min(phase) from test.meters;
```
-- 查询loc="beijing"的记录总条数:
+- 查询 location="beijing" 的记录总条数:
```mysql
-taos> select count(*) from test.meters where loc="beijing";
+taos> select count(*) from test.meters where location="beijing";
```
-- 查询areaid=10的所有记录的平均值、最大值、最小值等:
+- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
+taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
```
-- 对表t10按10s进行平均值、最大值和最小值聚合统计:
+- 对表 t10 按 10s 进行平均值、最大值和最小值聚合统计:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
+taos> select avg(current), max(voltage), min(phase) from test.t10 interval(10s);
```
-**Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。
+**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
## 客户端和报警模块
-如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下:
+如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。
-- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
-- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
-- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
-
-报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
-
-- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
+报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。
## 支持平台列表
-### TDengine服务器支持的平台列表
+### TDengine 服务器支持的平台列表
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
@@ -201,9 +195,9 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
-### TDengine客户端和连接器支持的平台列表
+### TDengine 客户端和连接器支持的平台列表
-目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。
+目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。
对照矩阵如下:
@@ -220,5 +214,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
-请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。
+请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index fb47d79268fe0a4fa84b444187a5aa700a687027..511bab8a605ce666d263d609d1599e30c85d78c4 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -532,8 +532,9 @@ Query OK, 1 row(s) in set (0.000141s)
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
-| 2.0.22 | 2.0.18.0 及以上 | 1.8.x |
-| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x |
+| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
+| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 2d76c866d11c1e1f51927c5536184b15aa6afe14..3a6e884f56addc7d2d4ccacad57ef3baa6844a4b 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -259,7 +259,7 @@ typedef struct taosField {
获取最近一次API调用失败的原因,返回值为字符串。
-- `char *taos_errno(TAOS_RES *res)`
+- `int taos_errno(TAOS_RES *res)`
获取最近一次API调用失败的原因,返回值为错误代码。
@@ -427,12 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* res:查询结果集,注意结果集中可能没有记录
* param:调用 `taos_subscribe`时客户程序提供的附加参数
* code:错误码
+
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。
+ **注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
+
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
@@ -554,6 +557,13 @@ c1.close()
conn.close()
```
+#### 关于纳秒 (nanosecond) 在 Python 连接器中的说明
+
+由于目前 Python 对 nanosecond 支持的不完善(参见链接 1. 2. ),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,涛思数据可能会修改相关接口。
+
+1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
+2. https://www.python.org/dev/peps/pep-0564/
+
#### 帮助信息
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
@@ -897,6 +907,10 @@ go env -w GOPROXY=https://goproxy.io,direct
sql.Open内置的方法,Close closes the statement.
+### 其他代码示例
+
+[Consume Messages from Kafka](https://github.com/taosdata/go-demo-kafka) 是一个通过 Go 语言实现消费 Kafka 队列写入 TDengine 的示例程序,也可以作为通过 Go 连接 TDengine 的写法参考。
+
## Node.js Connector
Node.js连接器支持的系统有:
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 10951ed1fbd7185b3944276fc393e59f352e05ef..19e4b761bad466ef80f2eb2ab128e99a4903b2d7 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -123,8 +123,8 @@ taosd -C
- minRows:文件块中记录的最小条数。单位为条,默认值:100。
- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。
- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
-- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)(可通过 alter database 修改)
-- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。(可通过 alter database 修改)
+- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)
+- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
- cache:内存块的大小。单位为兆字节(MB),默认值:16。
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
@@ -418,6 +418,19 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
+
+## 性能优化
+
+因数据行 [update](https://www.taosdata.com/cn/documentation/faq#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程:
+
+```mysql
+COMPACT VNODES IN (vg_id1, vg_id2, ...)
+```
+
+COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。
+
+需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
+
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
@@ -465,43 +478,44 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
-| 关键字列表 | | | | |
-| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
-| ABORT | COPY | ID | NCHAR | SPREAD |
-| ACCOUNT | COUNT | IF | NE | STABLE |
-| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
-| ADD | CTIME | IMMEDIATE | NOT | STAR |
-| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
-| ALL | DATABASES | IN | NOW | STDDEV |
-| ALTER | DAYS | INITIALLY | OF | STREAM |
-| AND | DEFERRED | INSERT | OFFSET | STREAMS |
-| AS | DELIMITERS | INSTEAD | OR | STRING |
-| ASC | DESC | INTEGER | ORDER | SUM |
-| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
-| AVG | DETACH | INTO | PERCENTILE | TABLES |
-| BEFORE | DIFF | IP | PLUS | TAG |
-| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
-| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
-| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
-| BINARY | DNODES | KEEP | QUERIES | TIMES |
-| BITAND | DOT | KEY | QUERY | TIMESTAMP |
-| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
-| BITOR | DROP | LAST | REM | TOP |
-| BOOL | EACH | LE | REPLACE | TOPIC |
-| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
-| BY | EQ | LIKE | RESET | UMINUS |
-| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
-| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
-| CHANGE | FAIL | LOCAL | ROWS | USE |
-| CLOG | FILL | LP | RP | USER |
-| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS |
-| COLON | FLOAT | LT | SCORES | USING |
-| COLUMN | FOR | MATCH | SELECT | VALUES |
-| COMMA | FROM | MAX | SEMI | VARIABLE |
-| COMP | GE | METRIC | SET | VGROUPS |
-| CONCAT | GLOB | METRICS | SHOW | VIEW |
-| CONFIGS | GRANTS | MIN | SLASH | WAVG |
-| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
-| CONNECTION | GT | MNODES | SLIMIT | |
+| 关键字列表 | | | | |
+| ------------ | ------------ | ------------ | ------------ | ------------ |
+| ABORT | CREATE | IGNORE | NULL | STAR |
+| ACCOUNT | CTIME | IMMEDIATE | OF | STATE |
+| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT |
+| ADD | DATABASES | IN | OR | STATE_WINDOW |
+| AFTER | DAYS | INITIALLY | ORDER | STORAGE |
+| ALL | DBS | INSERT | PARTITIONS | STREAM |
+| ALTER | DEFERRED | INSTEAD | PASS | STREAMS |
+| AND | DELIMITERS | INT | PLUS | STRING |
+| AS | DESC | INTEGER | PPS | SYNCDB |
+| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE |
+| ATTACH | DETACH | INTO | PREV | TABLES |
+| BEFORE | DISTINCT | IS | PRIVILEGE | TAG |
+| BEGIN | DIVIDE | ISNULL | QTIME | TAGS |
+| BETWEEN | DNODE | JOIN | QUERIES | TBNAME |
+| BIGINT | DNODES | KEEP | QUERY | TIMES |
+| BINARY | DOT | KEY | QUORUM | TIMESTAMP |
+| BITAND | DOUBLE | KILL | RAISE | TINYINT |
+| BITNOT | DROP | LE | REM | TOPIC |
+| BITOR | EACH | LIKE | REPLACE | TOPICS |
+| BLOCKS | END | LIMIT | REPLICA | TRIGGER |
+| BOOL | EQ | LINEAR | RESET | TSERIES |
+| BY | EXISTS | LOCAL | RESTRICT | UMINUS |
+| CACHE | EXPLAIN | LP | ROW | UNION |
+| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED |
+| CASCADE | FILE | LT | RSHIFT | UPDATE |
+| CHANGE | FILL | MATCH | SCORES | UPLUS |
+| CLUSTER | FLOAT | MAXROWS | SELECT | USE |
+| COLON | FOR | MINROWS | SEMI | USER |
+| COLUMN | FROM | MINUS | SESSION | USERS |
+| COMMA | FSYNC | MNODES | SET | USING |
+| COMP | GE | MODIFY | SHOW | VALUES |
+| COMPACT | GLOB | MODULES | SLASH | VARIABLE |
+| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES |
+| CONFLICT | GROUP | NE | SLIMIT | VGROUPS |
+| CONNECTION | GT | NONE | SMALLINT | VIEW |
+| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
+| CONNS | ID | NOTNULL | STABLE | WAL |
+| COPY | IF | NOW | STABLES | WHERE |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 5904abbbaa5598357fe57dfcc2ce68b731524b75..73fa5b34e5620a7b56e1edd35c6705bf47c5d306 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -89,13 +89,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```mysql
USE db_name;
```
- 使用/切换数据库
+ 使用/切换数据库(在 RESTful 连接方式下无效)。
- **删除数据库**
```mysql
DROP DATABASE [IF EXISTS] db_name;
```
- 删除数据库。所包含的全部数据表将被删除,谨慎使用
+ 删除数据库。指定 Database 所包含的全部数据表将被删除,谨慎使用!
- **修改数据库参数**
```mysql
@@ -129,16 +129,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。)
说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
- ```mysql
- ALTER DATABASE db_name WAL 1;
- ```
- WAL 参数控制 WAL 日志的落盘方式。缺省值为 1,取值范围为 [1, 2]。1 表示写 WAL,但不执行 fsync;2 表示写 WAL,而且执行 fsync。
-
- ```mysql
- ALTER DATABASE db_name FSYNC 3000;
- ```
- FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000,单位是毫秒,取值范围为 [0, 180000]。如果设置为 0,表示每次写入,立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。
-
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。
- **显示系统所有数据库**
@@ -218,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
- 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
+ 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
- **显示一个数据表的创建语句**
@@ -279,11 +269,11 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明:
- 1) TAGS 列的数据类型不能是 timestamp 类型;
+ 1) TAGS 列的数据类型不能是 timestamp 类型;(从 2.1.3.0 版本开始,TAGS 列中支持使用 timestamp 类型,但需注意在 TAGS 中的 timestamp 列写入数据时需要提供给定值,而暂不支持四则运算,例如 `NOW + 10s` 这类表达式)
2) TAGS 列名不能与其他列名相同;
- 3) TAGS 列名不能为预留关键字;
+ 3) TAGS 列名不能为预留关键字(参见:[参数限制与保留关键字](https://www.taosdata.com/cn/documentation/administrator#keywords) 章节);
4) TAGS 最多允许 128 个,至少 1 个,总长度不超过 16 KB。
@@ -372,77 +362,82 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
## 数据写入
-- **插入一条记录**
- ```mysql
- INSERT INTO tb_name VALUES (field_value, ...);
- ```
- 向表tb_name中插入一条记录。
+### 写入语法:
+
+```mysql
+INSERT INTO
+ tb_name
+ [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
+ [(field1_name, ...)]
+ VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
+ [tb2_name
+ [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
+ [(field1_name, ...)]
+ VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
+ ...];
+```
-- **插入一条记录,数据对应到指定的列**
+### 详细描述及示例:
+
+- **插入一条或多条记录**
+ 指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...);
+ INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32);
```
- 向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。
-
-- **插入多条记录**
+ 或者,可以通过如下语句写入两行记录:
```mysql
- INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
```
- 向表tb_name中插入多条记录。
- **注意**:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
+ **注意:**
+ 1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000。
+ 2)在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为 NOW,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
+ 3)允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值(数据文件存储数据的时间跨度,单位为天)。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
-- **按指定的列插入多条记录**
+- **插入记录,数据对应到指定的列**
+ 向数据子表中插入记录时,无论插入一行还是多行,都可以让数据对应到指定的列。对于 SQL 语句中没有出现的列,数据库将自动填充为 NULL。主键(时间戳)不能为 NULL。例如:
```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31);
```
- 向表tb_name中按指定的列插入多条记录。
+ **说明:**如果不指定列,也即使用全列模式——那么在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列模式写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入 NULL。
-- **向多个表插入多条记录**
+- **向多个表插入记录**
+ 可以在一条语句中,分别向多个表插入一条或多条记录,并且也可以在插入过程中指定列。例如:
```mysql
- INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
- tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+ d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
- 同时向表tb1_name和tb2_name中分别插入多条记录。
-- **同时向多个表按列插入多条记录**
+- **插入记录时自动建表**
+ 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
```mysql
- INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
- tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
```
- 同时向表tb1_name和tb2_name中按列分别插入多条记录。
-
- 注意:
- 1) 如果时间戳为now,系统将自动使用客户端当前时间作为该记录的时间戳;
- 2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
-
-- **插入记录时自动建表**
+ 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
```mysql
- INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
```
- 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。
-
-- **插入记录时自动建表,并指定具体的 TAGS 列**
+ 自动建表语法也支持在一条语句中向多个表插入记录。例如:
```mysql
- INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+ d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
+ d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
- 在自动建表时,可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将取为空值。
+ **说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。
-- **同时向多个表按列插入多条记录,自动建表**
- ```mysql
- INSERT INTO tb1_name (tb1_field1_name, ...) [USING stb1_name TAGS (tag_value1, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...
- tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
+- **插入来自文件的数据记录**
+ 除了使用 VALUES 关键字插入一行或多行数据外,也可以把要写入的数据放在 CSV 文件中(英文逗号分隔、英文单引号括住每个值)供 SQL 指令读取。其中 CSV 文件无需表头。例如,如果 /tmp/csvfile.csv 文件的内容为:
+ ```
+ '2021-07-13 14:07:34.630', '10.2', '219', '0.32'
+ '2021-07-13 14:07:35.779', '10.15', '217', '0.33'
```
- 以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
- 说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入NULL。
- 从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写:
+ 那么通过如下指令可以把这个文件中的数据写入子表中:
```mysql
- INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
```
- 注意:虽然两种写法都可以,但并不能在一条 SQL 语句中混用,否则会报语法错误。
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
-说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
+**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。
```mysql
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
@@ -696,29 +691,31 @@ Query OK, 1 row(s) in set (0.001091s)
* 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
-- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
+- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。且 `LIMIT 5 OFFSET 2` 可以简写为 `LIMIT 2, 5`。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
-- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
+- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。且 `SLIMIT 5 SOFFSET 2` 可以简写为 `SLIMIT 2, 5`。
- 通过 “>>” 输出结果可以导出到指定文件。
### 支持的条件过滤操作
-| **Operation** | **Note** | **Applicable Data Types** |
-| --------------- | ----------------------------- | ------------------------------------- |
-| > | larger than | **`timestamp`** and all numeric types |
-| < | smaller than | **`timestamp`** and all numeric types |
-| >= | larger than or equal to | **`timestamp`** and all numeric types |
-| <= | smaller than or equal to | **`timestamp`** and all numeric types |
-| = | equal to | all types |
-| <> | not equal to | all types |
-| between and | within a certain range | **`timestamp`** and all numeric types |
-| % | match with any char sequences | **`binary`** **`nchar`** |
-| _ | match with a single char | **`binary`** **`nchar`** |
+| **Operation** | **Note** | **Applicable Data Types** |
+| --------------- | ----------------------------- | ----------------------------------------- |
+| > | larger than | **`timestamp`** and all numeric types |
+| < | smaller than | **`timestamp`** and all numeric types |
+| >= | larger than or equal to | **`timestamp`** and all numeric types |
+| <= | smaller than or equal to | **`timestamp`** and all numeric types |
+| = | equal to | all types |
+| <> | not equal to | all types |
+| between and | within a certain range | **`timestamp`** and all numeric types |
+| in | matches any value in a set | all types except first column `timestamp` |
+| % | match with any char sequences | **`binary`** **`nchar`** |
+| _ | match with a single char | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
-3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
-4. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
+4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。
@@ -57,7 +57,7 @@
com.google.guava
guava
- 29.0-jre
+ 30.0-jre
@@ -113,16 +113,16 @@
**/*Test.java
- **/TSDBJNIConnectorTest.java
- **/UnsignedNumberJniTest.java
- **/DatetimeBefore1970Test.java
**/AppMemoryLeakTest.java
**/AuthenticationTest.java
- **/TaosInfoMonitorTest.java
+ **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
+ **/DatetimeBefore1970Test.java
**/FailOverTest.java
**/InvalidResultSetPointerTest.java
**/RestfulConnectionTest.java
- **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
+ **/TSDBJNIConnectorTest.java
+ **/TaosInfoMonitorTest.java
+ **/UnsignedNumberJniTest.java
true
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java
index 686ef3626249d0c6c8e89ca848e054238fe65562..b3887d436bcacd9748d169d0a7666a7bfa85589a 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java
@@ -1,6 +1,6 @@
package com.taosdata.jdbc;
-import com.taosdata.jdbc.rs.enums.TimestampFormat;
+import com.taosdata.jdbc.enums.TimestampFormat;
import java.sql.*;
import java.util.Enumeration;
@@ -306,9 +306,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
- if (isClosed())
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
-
switch (resultSetHoldability) {
case ResultSet.HOLD_CURSORS_OVER_COMMIT:
break;
@@ -322,11 +319,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
}
@Override
- public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
- throws SQLException {
- if (isClosed())
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
-
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
switch (resultSetHoldability) {
case ResultSet.HOLD_CURSORS_OVER_COMMIT:
break;
@@ -425,7 +418,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
status = resultSet.getInt("server_status()");
resultSet.close();
}
- return status == 1 ? true : false;
+ return status == 1;
});
boolean status = false;
@@ -434,9 +427,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
status = future.get();
else
status = future.get(timeout, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- e.printStackTrace();
- } catch (ExecutionException e) {
+ } catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
} catch (TimeoutException e) {
future.cancel(true);
@@ -452,8 +443,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
if (isClosed)
throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
- if (clientInfoProps != null)
- clientInfoProps.setProperty(name, value);
+ clientInfoProps.setProperty(name, value);
}
@Override
@@ -461,8 +451,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
if (isClosed)
throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
- for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) {
- String name = (String) enumer.nextElement();
+ for (Enumeration enumeration = properties.keys(); enumeration.hasMoreElements(); ) {
+ String name = (String) enumeration.nextElement();
clientInfoProps.put(name, properties.getProperty(name));
}
}
@@ -528,14 +518,13 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
if (milliseconds < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
+ // do nothing
}
@Override
public int getNetworkTimeout() throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
-
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
+ return 0;
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
index 48811d30a6903c077a9c0fb55176b0bc2ca9ba75..3c9c784f594d6cb022267c2ff1cd848c26f53ac3 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
@@ -12,8 +12,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
private final static int DRIVER_MAJAR_VERSION = 2;
private final static int DRIVER_MINOR_VERSION = 0;
- private String precision = "ms";
- private String database;
+ private String precision = TSDBConstants.DEFAULT_PRECISION;
public boolean allProceduresAreCallable() throws SQLException {
return false;
@@ -1223,7 +1222,6 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ResultSet databases = stmt.executeQuery("show databases");
while (databases.next()) {
String dbname = databases.getString("name");
- this.database = dbname;
this.precision = databases.getString("precision");
if (dbname.equalsIgnoreCase(catalog))
return true;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java
index 5eaac1cd3ba7283b019a1d294c1a33334a3d9fa7..7d4a2683918d917c795fc4d23af3369560e3ef52 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java
@@ -58,7 +58,7 @@ public abstract class AbstractDriver implements Driver {
value = parameterValuePair.substring(indexOfEqual + 1);
}
}
- if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) {
+ if (value != null && value.length() > 0 && parameter.length() > 0) {
urlProps.setProperty(parameter, value);
}
}
@@ -87,7 +87,7 @@ public abstract class AbstractDriver implements Driver {
url = url.substring(0, indexOfColon);
}
// parse host
- if (url != null && url.length() > 0 && url.trim().length() > 0) {
+ if (url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
return urlProps;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java
index cb6c7d40ad1f1e12324043d78cb423fcfd46dca8..7d9d8ee5154bf3e63c024d0ded1defd2d2bdb4b0 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java
@@ -1,7 +1,5 @@
package com.taosdata.jdbc;
-import com.sun.org.apache.xpath.internal.operations.Bool;
-
import java.sql.ParameterMetaData;
import java.sql.SQLException;
import java.sql.Timestamp;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
index cd266529f34c0693a9891af33c12705b90132800..bda3d522123d09ece81384c6eba814c7e548e1ec 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
@@ -15,10 +15,12 @@
package com.taosdata.jdbc;
import java.math.BigDecimal;
-import java.sql.*;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
-import java.util.Iterator;
import java.util.List;
/*
@@ -130,7 +132,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
public Timestamp getTimestamp(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getTimestamp(columnIndex,nativeType);
+ return rowCursor.getTimestamp(columnIndex, nativeType);
}
@Override
@@ -145,9 +147,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override
public int findColumn(String columnLabel) throws SQLException {
- Iterator colMetaDataIt = this.columnMetaDataList.iterator();
- while (colMetaDataIt.hasNext()) {
- ColumnMetaData colMetaData = colMetaDataIt.next();
+ for (ColumnMetaData colMetaData : this.columnMetaDataList) {
if (colMetaData.getColName() != null && colMetaData.getColName().equals(columnLabel)) {
return colMetaData.getColIndex();
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java
index b6587b942de3c46139fa0640c07098cbc2b025d4..64b4276e93497f9aab8117df83de90ba28e74b78 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java
@@ -23,7 +23,7 @@ import java.util.Calendar;
import java.util.Map;
/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
+ * TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
index 02fee74eb5544f282559f88dab723ccfd8ca096f..8cd8da6de4f7d5324afbc6d5a5d54d6b8dcc7a8d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
@@ -20,7 +20,7 @@ import java.util.Properties;
public class TSDBConnection extends AbstractConnection {
private TSDBJNIConnector connector;
- private TSDBDatabaseMetaData databaseMetaData;
+ private final TSDBDatabaseMetaData databaseMetaData;
private boolean batchFetch;
public Boolean getBatchFetch() {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
index e6406d2c6dc7990ec4f3149bb8c5146202f5d326..5b5128e7204f9e75c5901a0d2a5daee9b4c082b2 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
@@ -54,10 +54,11 @@ public abstract class TSDBConstants {
public static final int TSDB_DATA_TYPE_USMALLINT = 12; //unsigned smallint
public static final int TSDB_DATA_TYPE_UINT = 13; //unsigned int
public static final int TSDB_DATA_TYPE_UBIGINT = 14; //unsigned bigint
+
// nchar column max length
public static final int maxFieldSize = 16 * 1024;
- // precision for data types
+ // precision for data types, this is used for metadata
public static final int BOOLEAN_PRECISION = 1;
public static final int TINYINT_PRECISION = 4;
public static final int SMALLINT_PRECISION = 6;
@@ -67,10 +68,12 @@ public abstract class TSDBConstants {
public static final int DOUBLE_PRECISION = 22;
public static final int TIMESTAMP_MS_PRECISION = 23;
public static final int TIMESTAMP_US_PRECISION = 26;
- // scale for data types
+ // scale for data types, this is used for metadata
public static final int FLOAT_SCALE = 31;
public static final int DOUBLE_SCALE = 31;
+ public static final String DEFAULT_PRECISION = "ms";
+
public static int typeName2JdbcType(String type) {
switch (type.toUpperCase()) {
case "TIMESTAMP":
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
index 8b7ede148e89cce0d8db22e62627bd1e1c49f9bb..9a5eda4cd8bf59e75540a4ce0a1d7fb4255fa1f1 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
@@ -20,8 +20,8 @@ import java.sql.SQLException;
public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData {
- private String url;
- private String userName;
+ private final String url;
+ private final String userName;
private Connection conn;
public TSDBDatabaseMetaData(String url, String userName) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index 55533bd28cc4027c2e4a258cf2a36fd5b72d12f2..98a7d1929b824445362a7653f05a2725a41cc926 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -176,7 +176,7 @@ public class TSDBDriver extends AbstractDriver {
int beginningOfSlashes = url.indexOf("//");
int index = url.indexOf("?");
if (index != -1) {
- String paramString = url.substring(index + 1, url.length());
+ String paramString = url.substring(index + 1);
url = url.substring(0, index);
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
while (queryParams.hasMoreElements()) {
@@ -213,7 +213,7 @@ public class TSDBDriver extends AbstractDriver {
url = url.substring(0, indexOfColon);
}
- if (url != null && url.length() > 0 && url.trim().length() > 0) {
+ if (url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
@@ -233,7 +233,7 @@ public class TSDBDriver extends AbstractDriver {
return false;
}
- public Logger getParentLogger() throws SQLFeatureNotSupportedException {
+ public Logger getParentLogger() {
return null;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
index 411a61eb86e317f5e38e37d2506ec1bdbe01c0a1..da89081428bb076c69be5e5aac189aa467d09307 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
@@ -8,7 +8,7 @@ import java.util.HashMap;
import java.util.Map;
public class TSDBError {
- private static Map TSDBErrorMap = new HashMap<>();
+ private static final Map TSDBErrorMap = new HashMap<>();
static {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED, "connection already closed");
@@ -34,9 +34,8 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision");
- /**************************************************/
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
- /**************************************************/
+
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database");
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
index 9f0ba5afabde71bf6359cbfb66757e150899deaf..714162b563d7358a1ed0b20476b0775d9cb343cb 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
@@ -1,6 +1,7 @@
package com.taosdata.jdbc;
import java.util.HashSet;
+import java.util.Set;
public class TSDBErrorNumbers {
@@ -39,10 +40,9 @@ public class TSDBErrorNumbers {
public static final int ERROR_JNI_FETCH_END = 0x2358; // fetch to the end of resultSet
public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; // JNI alloc memory failed
- private static final HashSet errorNumbers;
+ private static final Set errorNumbers = new HashSet();
static {
- errorNumbers = new HashSet();
errorNumbers.add(ERROR_CONNECTION_CLOSED);
errorNumbers.add(ERROR_UNSUPPORTED_METHOD);
errorNumbers.add(ERROR_INVALID_VARIABLE);
@@ -65,7 +65,6 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION);
- /*****************************************************/
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
errorNumbers.add(ERROR_UNSUPPORTED_ENCODING);
errorNumbers.add(ERROR_JNI_TDENGINE_ERROR);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index bc4d58785a1cfd15ba236c3e2f6e355f209ec916..7f400fc1eeae2efc3d0ab800083969404c50a469 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -29,14 +29,9 @@ import java.util.List;
public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
- private TaosInfo taosInfo = TaosInfo.getInstance();
-
- // Connection pointer used in C
- private long taos = TSDBConstants.JNI_NULL_POINTER;
-
- // result set status in current connection
- private boolean isResultsetClosed;
-
+ private final TaosInfo taosInfo = TaosInfo.getInstance();
+ private long taos = TSDBConstants.JNI_NULL_POINTER; // Connection pointer used in C
+ private boolean isResultsetClosed; // result set status in current connection
private int affectedRows = -1;
static {
@@ -96,11 +91,9 @@ public class TSDBJNIConnector {
/**
* Execute DML/DDL operation
- *
- * @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
- Long pSql = 0l;
+ long pSql = 0L;
try {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
taosInfo.stmt_count_increment();
@@ -161,7 +154,7 @@ public class TSDBJNIConnector {
private native long getResultSetImp(long connection, long pSql);
public boolean isUpdateQuery(long pSql) {
- return isUpdateQueryImp(this.taos, pSql) == 1 ? true : false;
+ return isUpdateQueryImp(this.taos, pSql) == 1;
}
private native long isUpdateQueryImp(long connection, long pSql);
@@ -195,7 +188,7 @@ public class TSDBJNIConnector {
*/
public int getSchemaMetaData(long resultSet, List columnMetaData) {
int ret = this.getSchemaMetaDataImp(this.taos, resultSet, columnMetaData);
- columnMetaData.stream().forEach(column -> column.setColIndex(column.getColIndex() + 1));
+ columnMetaData.forEach(column -> column.setColIndex(column.getColIndex() + 1));
return ret;
}
@@ -218,8 +211,9 @@ public class TSDBJNIConnector {
/**
* Get Result Time Precision.
+ *
* @return 0: ms, 1: us, 2: ns
- */
+ */
public int getResultTimePrecision(long sqlObj) {
return this.getResultTimePrecisionImp(this.taos, sqlObj);
}
@@ -228,8 +222,6 @@ public class TSDBJNIConnector {
/**
* Execute close operation from C to release connection pointer by JNI
- *
- * @throws SQLException
*/
public void closeConnection() throws SQLException {
int code = this.closeConnectionImp(this.taos);
@@ -268,8 +260,6 @@ public class TSDBJNIConnector {
/**
* Unsubscribe, close a subscription
- *
- * @param subscription
*/
void unsubscribe(long subscription, boolean isKeep) {
unsubscribeImp(subscription, isKeep);
@@ -282,13 +272,13 @@ public class TSDBJNIConnector {
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
- return res != 0 ? false : true;
+ return res == 0;
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException {
- Long stmt;
+ long stmt;
try {
stmt = prepareStmtImp(sql.getBytes(), this.taos);
} catch (Exception e) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index 1a007156e9441dbea0806aa7c66e8dbfd84e4e7b..22fb0c4ae4987ade0a406fe5628bf80d975f3ae5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -57,13 +57,13 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameterCnt++;
}
}
- parameters = new Object[parameterCnt];
}
+ parameters = new Object[parameterCnt];
if (parameterCnt > 1) {
// the table name is also a parameter, so ignore it.
- this.colData = new ArrayList();
- this.tableTags = new ArrayList();
+ this.colData = new ArrayList<>();
+ this.tableTags = new ArrayList<>();
}
}
@@ -73,12 +73,16 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
/*
+ *
+ */
+
+ /**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
* order to process those supported SQLs.
*/
private void preprocessSql() {
- /***** For processing some of Spark SQLs*****/
+ /***For processing some of Spark SQLs*/
// should replace it first
this.rawSql = this.rawSql.replaceAll("or (.*) is null", "");
this.rawSql = this.rawSql.replaceAll(" where ", " WHERE ");
@@ -125,7 +129,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
rawSql = rawSql.replace(matcher.group(1), tableFullName);
}
- /***** for inner queries *****/
}
@Override
@@ -519,12 +522,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
- ;
-
private static class TableTagInfo {
private boolean isNull;
- private Object value;
- private int type;
+ private final Object value;
+ private final int type;
public TableTagInfo(Object value, int type) {
this.value = value;
@@ -538,8 +539,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
- ;
-
public void setTableName(String name) {
this.tableName = name;
}
@@ -627,7 +626,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.colData.addAll(Collections.nCopies(this.parameters.length - 1 - this.tableTags.size(), null));
}
- ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
+ ColumnInfo col = this.colData.get(columnIndex);
if (col == null) {
ColumnInfo p = new ColumnInfo();
p.setType(type);
@@ -718,8 +717,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
ByteBuffer isNullList = ByteBuffer.allocate(num * Integer.BYTES);
isNullList.order(ByteOrder.LITTLE_ENDIAN);
- for (int i = 0; i < num; ++i) {
- TableTagInfo tag = this.tableTags.get(i);
+ for (TableTagInfo tag : this.tableTags) {
if (tag.isNull) {
typeList.put((byte) tag.type);
isNullList.putInt(1);
@@ -818,7 +816,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
typeList, lengthList, isNullList);
}
- ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
+ ColumnInfo colInfo = this.colData.get(0);
if (colInfo == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
}
@@ -954,7 +952,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
}
}
- ;
connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
index 59a64ad520f01c8c7f2b85f95057365e2410ecb6..00a62206fc7861a87177d14cc4b274c464dc4184 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
@@ -339,11 +339,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
- res = new BigDecimal(Long.valueOf(this.rowData.getObject(columnIndex).toString()));
+ res = new BigDecimal(Long.parseLong(this.rowData.getObject(columnIndex).toString()));
break;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
- res = new BigDecimal(Double.valueOf(this.rowData.getObject(columnIndex).toString()));
+ res = BigDecimal.valueOf(Double.parseDouble(this.rowData.getObject(columnIndex).toString()));
break;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return new BigDecimal(((Timestamp) this.rowData.getObject(columnIndex)).getTime());
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index 7b3be5d26397eae704d98f1e1802af14abaad4fc..6211f61dc505d2ccba5f11f3aacc980771b1a110 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -36,23 +36,20 @@ public class TSDBResultSetBlockData {
private int rowIndex = 0;
private List columnMetaDataList;
- private ArrayList colData = null;
+ private ArrayList colData;
public TSDBResultSetBlockData(List colMeta, int numOfCols) {
this.columnMetaDataList = colMeta;
- this.colData = new ArrayList(numOfCols);
+ this.colData = new ArrayList<>(numOfCols);
}
public TSDBResultSetBlockData() {
- this.colData = new ArrayList();
+ this.colData = new ArrayList<>();
}
public void clear() {
int size = this.colData.size();
- if (this.colData != null) {
- this.colData.clear();
- }
-
+ this.colData.clear();
setNumOfCols(size);
}
@@ -69,7 +66,7 @@ public class TSDBResultSetBlockData {
}
public void setNumOfCols(int numOfCols) {
- this.colData = new ArrayList(numOfCols);
+ this.colData = new ArrayList<>(numOfCols);
this.colData.addAll(Collections.nCopies(numOfCols, null));
}
@@ -166,15 +163,10 @@ public class TSDBResultSetBlockData {
}
}
-
/**
* The original type may not be a string type, but will be converted to by
* calling this method
- *
- * @param col column index
- * @return
- * @throws SQLException
*/
public String getString(int col) throws SQLException {
Object obj = get(col);
@@ -387,7 +379,7 @@ public class TSDBResultSetBlockData {
return null;
}
- return (long) val;
+ return val;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
index 48d42473926b638401cf5d9dd97466695ba452ab..6292673352529171cdc42ba73e0f47f8f05a21a4 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
@@ -41,10 +41,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
}
public boolean isSearchable(int column) throws SQLException {
- if (column == 1) {
- return true;
- }
- return false;
+ return column == 1;
}
public boolean isCurrency(int column) throws SQLException {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
index e00ec1e758f720c6dc59fd1a0d41f258ca66d4cc..b91fe88dfacd7c6e414aa842184b2c349b3e33b9 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
@@ -19,14 +19,13 @@ import com.taosdata.jdbc.utils.NullType;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.sql.Timestamp;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
public class TSDBResultSetRowData {
private ArrayList data;
- private int colSize;
+ private final int colSize;
public TSDBResultSetRowData(int colSize) {
this.colSize = colSize;
@@ -390,27 +389,27 @@ public class TSDBResultSetRowData {
switch (nativeType) {
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: {
- Byte value = new Byte(String.valueOf(obj));
+ byte value = new Byte(String.valueOf(obj));
if (value >= 0)
- return value.toString();
+ return Byte.toString(value);
return Integer.toString(value & 0xff);
}
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: {
- Short value = new Short(String.valueOf(obj));
+ short value = new Short(String.valueOf(obj));
if (value >= 0)
- return value.toString();
+ return Short.toString(value);
return Integer.toString(value & 0xffff);
}
case TSDBConstants.TSDB_DATA_TYPE_UINT: {
- Integer value = new Integer(String.valueOf(obj));
+ int value = new Integer(String.valueOf(obj));
if (value >= 0)
- return value.toString();
- return Long.toString(value & 0xffffffffl);
+ return Integer.toString(value);
+ return Long.toString(value & 0xffffffffL);
}
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
- Long value = new Long(String.valueOf(obj));
+ long value = new Long(String.valueOf(obj));
if (value >= 0)
- return value.toString();
+ return Long.toString(value);
long lowValue = value & 0x7fffffffffffffffL;
return BigDecimal.valueOf(lowValue).add(BigDecimal.valueOf(Long.MAX_VALUE)).add(BigDecimal.valueOf(1)).toString();
}
@@ -432,25 +431,26 @@ public class TSDBResultSetRowData {
/**
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
+ *
* @param precision 0 : ms, 1 : us, 2 : ns
*/
public void setTimestamp(int col, long ts, int precision) {
- long milliseconds = 0;
- int fracNanoseconds = 0;
+ long milliseconds;
+ int fracNanoseconds;
switch (precision) {
case 0: {
milliseconds = ts;
- fracNanoseconds = (int)(ts*1_000_000%1_000_000_000);
+ fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000);
break;
}
case 1: {
- milliseconds = ts/1_000;
- fracNanoseconds = (int)(ts*1_000%1_000_000_000);
+ milliseconds = ts / 1_000;
+ fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000);
break;
}
case 2: {
- milliseconds = ts/1_000_000;
- fracNanoseconds = (int)(ts%1_000_000_000);
+ milliseconds = ts / 1_000_000;
+ fracNanoseconds = (int) (ts % 1_000_000_000);
break;
}
default: {
@@ -467,12 +467,10 @@ public class TSDBResultSetRowData {
Object obj = data.get(col - 1);
if (obj == null)
return null;
- switch (nativeType) {
- case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
- return new Timestamp((Long) obj);
- default:
- return (Timestamp) obj;
+ if (nativeType == TSDBConstants.TSDB_DATA_TYPE_BIGINT) {
+ return new Timestamp((Long) obj);
}
+ return (Timestamp) obj;
}
public Object getObject(int col) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java
index c5fd497ca3dfed8bc8555110660ff70a9fd23447..d74f7755b24f1eb59059fb5ad5ead48d420ec7f6 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java
@@ -47,9 +47,6 @@ public class TSDBSubscribe {
/**
* close subscription
- *
- * @param keepProgress
- * @throws SQLException
*/
public void close(boolean keepProgress) throws SQLException {
if (this.connecter.isClosed())
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/enums/TimestampFormat.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java
similarity index 65%
rename from src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/enums/TimestampFormat.java
rename to src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java
index edc429857e4b47031b60f6a49a62f21e183d80ed..5ff0774639fb20c575d46c41d977b4ce3a16de10 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/enums/TimestampFormat.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java
@@ -1,4 +1,4 @@
-package com.taosdata.jdbc.rs.enums;
+package com.taosdata.jdbc.enums;
public enum TimestampFormat {
STRING,
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
index d6a02b7e3a7d6ef2d7527cd101743cc4575b43ba..12a0ab57e2c35c7f1f550dd213db19a0effd4ebc 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
@@ -16,7 +16,7 @@ public class RestfulConnection extends AbstractConnection {
private final String host;
private final int port;
private final String url;
- private volatile String database;
+ private final String database;
private final String token;
/******************************************************/
private boolean isClosed;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
index 16272f4289ea9031afc91fa76260a8d5719f9a77..f2abbd24454ff13f708636b61bcf93be2e0d47b5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java
@@ -13,7 +13,7 @@ import java.util.Calendar;
public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement {
- private ParameterMetaData parameterMetaData;
+ private final ParameterMetaData parameterMetaData;
private final String rawSql;
private Object[] parameters;
private boolean isPrepared;
@@ -22,16 +22,16 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
super(conn, database);
this.rawSql = sql;
+ int parameterCnt = 0;
if (sql.contains("?")) {
- int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) {
parameterCnt++;
}
}
- parameters = new Object[parameterCnt];
this.isPrepared = true;
}
+ parameters = new Object[parameterCnt];
// build parameterMetaData
this.parameterMetaData = new RestfulParameterMetaData(parameters);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
index 13850f0b804fef1306789fe0dba0246a0bd8038a..d435062ef6d231e4c4f3545e75b8299e9876739a 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
@@ -7,7 +7,7 @@ import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.*;
import com.taosdata.jdbc.enums.TimestampPrecision;
-import com.taosdata.jdbc.rs.enums.TimestampFormat;
+import com.taosdata.jdbc.enums.TimestampFormat;
import com.taosdata.jdbc.utils.Utils;
import java.math.BigDecimal;
@@ -17,19 +17,20 @@ import java.time.ZoneOffset;
import java.time.format.DateTimeParseException;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.List;
public class RestfulResultSet extends AbstractResultSet implements ResultSet {
- private volatile boolean isClosed;
- private int pos = -1;
- private final String database;
private final Statement statement;
// data
- private final ArrayList> resultSet = new ArrayList<>();
+ private final List> resultSet = new ArrayList<>();
// meta
- private ArrayList columnNames = new ArrayList<>();
- private ArrayList columns = new ArrayList<>();
- private RestfulResultSetMetaData metaData;
+ private final List columnNames = new ArrayList<>();
+ private final List columns = new ArrayList<>();
+ private final RestfulResultSetMetaData metaData;
+
+ private volatile boolean isClosed;
+ private int pos = -1;
/**
* 由一个result的Json构造结果集,对应执行show databases, show tables等这些语句,返回结果集,但无法获取结果集对应的meta,统一当成String处理
@@ -37,35 +38,30 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
* @param resultJson: 包含data信息的结果集,有sql返回的结果集
***/
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException {
- this.database = database;
this.statement = statement;
+ // get head
+ JSONArray head = resultJson.getJSONArray("head");
// get column metadata
JSONArray columnMeta = resultJson.getJSONArray("column_meta");
// get row data
JSONArray data = resultJson.getJSONArray("data");
- if (data == null || data.isEmpty()) {
- columnNames.clear();
- columns.clear();
- this.resultSet.clear();
- this.metaData = new RestfulResultSetMetaData(this.database, null, this);
- return;
- }
- // get head
- JSONArray head = resultJson.getJSONArray("head");
// get rows
Integer rows = resultJson.getInteger("rows");
+
// parse column_meta
if (columnMeta != null) {
parseColumnMeta_new(columnMeta);
} else {
parseColumnMeta_old(head, data, rows);
}
- this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
+ this.metaData = new RestfulResultSetMetaData(database, columns, this);
+
+ if (data == null || data.isEmpty())
+ return;
// parse row data
- resultSet.clear();
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
- ArrayList row = new ArrayList();
+ List row = new ArrayList();
JSONArray jsonRow = data.getJSONArray(rowIndex);
for (int colIndex = 0; colIndex < this.metaData.getColumnCount(); colIndex++) {
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));
@@ -174,8 +170,8 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
if (value < 1_0000_0000_0000_0L)
return new Timestamp(value);
- long epochSec = value / 1000_000l;
- long nanoAdjustment = value % 1000_000l * 1000l;
+ long epochSec = value / 1000_000L;
+ long nanoAdjustment = value % 1000_000L * 1000L;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
case UTC: {
@@ -244,10 +240,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
pos++;
- if (pos <= resultSet.size() - 1) {
- return true;
- }
- return false;
+ return pos <= resultSet.size() - 1;
}
@Override
@@ -257,13 +250,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
}
}
-// @Override
-// public boolean wasNull() throws SQLException {
-// if (isClosed())
-// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
-// return resultSet.isEmpty();
-// }
-
@Override
public String getString(int columnIndex) throws SQLException {
checkAvailability(columnIndex, resultSet.get(pos).size());
@@ -288,7 +274,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
wasNull = false;
if (value instanceof Boolean)
return (boolean) value;
- return Boolean.valueOf(value.toString());
+ return Boolean.parseBoolean(value.toString());
}
@Override
@@ -443,9 +429,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return new Date(((Timestamp) value).getTime());
- Date date = null;
- date = Utils.parseDate(value.toString());
- return date;
+ return Utils.parseDate(value.toString());
}
@Override
@@ -460,8 +444,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
Time time = null;
try {
time = Utils.parseTime(value.toString());
- } catch (DateTimeParseException e) {
- time = null;
+ } catch (DateTimeParseException ignored) {
}
return time;
}
@@ -525,9 +508,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Long || value instanceof Integer || value instanceof Short || value instanceof Byte)
- return new BigDecimal(Long.valueOf(value.toString()));
+ return new BigDecimal(Long.parseLong(value.toString()));
if (value instanceof Double || value instanceof Float)
- return new BigDecimal(Double.valueOf(value.toString()));
+ return BigDecimal.valueOf(Double.parseDouble(value.toString()));
if (value instanceof Timestamp)
return new BigDecimal(((Timestamp) value).getTime());
BigDecimal ret;
@@ -637,36 +620,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
-// if (this.resultSet.size() == 0)
-// return false;
-//
-// if (row == 0) {
-// beforeFirst();
-// return false;
-// } else if (row == 1) {
-// return first();
-// } else if (row == -1) {
-// return last();
-// } else if (row > this.resultSet.size()) {
-// afterLast();
-// return false;
-// } else {
-// if (row < 0) {
-// // adjust to reflect after end of result set
-// int newRowPosition = this.resultSet.size() + row + 1;
-// if (newRowPosition <= 0) {
-// beforeFirst();
-// return false;
-// } else {
-// return absolute(newRowPosition);
-// }
-// } else {
-// row--; // adjust for index difference
-// this.pos = row;
-// return true;
-// }
-// }
-
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -710,5 +663,4 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return isClosed;
}
-
-}
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java
index a185abb709605cdbc9afecd26d33a1cac85ce37d..148f77974fcec9a997ade815f56b12564e537f58 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java
@@ -7,20 +7,17 @@ import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class RestfulResultSetMetaData extends WrapperImpl implements ResultSetMetaData {
private final String database;
- private List fields;
- private final RestfulResultSet resultSet;
+ private final List fields;
- public RestfulResultSetMetaData(String database, ArrayList fields, RestfulResultSet resultSet) {
+ public RestfulResultSetMetaData(String database, List fields, RestfulResultSet resultSet) {
this.database = database;
this.fields = fields == null ? Collections.emptyList() : fields;
- this.resultSet = resultSet;
}
public List getFields() {
@@ -141,8 +138,8 @@ public class RestfulResultSetMetaData extends WrapperImpl implements ResultSetMe
@Override
public String getColumnTypeName(int column) throws SQLException {
- int taos_type = fields.get(column - 1).taos_type;
- return TSDBConstants.taosType2JdbcTypeName(taos_type);
+ int taosType = fields.get(column - 1).taos_type;
+ return TSDBConstants.taosType2JdbcTypeName(taosType);
}
@Override
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index e9d193f6b412f6ab835d39f97a229f137e48cacf..f8acd8f06180476a09519c0809dd493d062c911c 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -35,10 +35,6 @@ public class RestfulStatement extends AbstractStatement {
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
- if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
- return executeOneQuery(sql);
- }
-
return executeOneQuery(sql);
}
@@ -50,9 +46,6 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
- if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
- return executeOneUpdate(url, sql);
- }
return executeOneUpdate(url, sql);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
index ff27bdbdad2d5f56a4652ae822b9ba10c80c51b0..715b051b74840cd01968957ab35493db2f53c5f6 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
@@ -27,19 +27,18 @@ public class HttpClientPoolUtil {
private static final int DEFAULT_MAX_TOTAL = 1000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
- private static PoolingHttpClientConnectionManager connectionManager;
private static CloseableHttpClient httpClient;
private static synchronized void initPools() {
if (httpClient == null) {
- connectionManager = new PoolingHttpClientConnectionManager();
+ PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).build();
}
}
- private static ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
+ private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
while (it.hasNext()) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
index 251ca2af013e2b1c9cb314b776621455c91d9384..15faa41a27c8aef9359ef938975648b5c10f4a9f 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
@@ -14,10 +14,6 @@
*****************************************************************************/
package com.taosdata.jdbc.utils;
-import com.taosdata.jdbc.TSDBConnection;
-
-import java.sql.Connection;
-
public class SqlSyntaxValidator {
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe"};
@@ -26,12 +22,6 @@ public class SqlSyntaxValidator {
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
- private TSDBConnection tsdbConnection;
-
- public SqlSyntaxValidator(Connection connection) {
- this.tsdbConnection = (TSDBConnection) connection;
- }
-
public static boolean isValidForExecuteUpdate(String sql) {
for (String prefix : updateSQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
index 4c92d27a28625e407d94e34da60aa7a57760baac..efe3303bd950e49f40e55b61bbca2cddf807b14f 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
@@ -6,6 +6,7 @@ import com.google.common.collect.TreeRangeSet;
import com.taosdata.jdbc.enums.TimestampPrecision;
import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
@@ -109,7 +110,7 @@ public class Utils {
return rawSql;
// toLowerCase
String preparedSql = rawSql.trim().toLowerCase();
- String[] clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)", "where\\s*.*"};
+ String[] clause = new String[]{"values\\s*\\([\\s\\S]*?\\)", "tags\\s*\\([\\s\\S]*?\\)", "where[\\s\\S]*"};
Map placeholderPositions = new HashMap<>();
RangeSet clauseRangeSet = TreeRangeSet.create();
findPlaceholderPosition(preparedSql, placeholderPositions);
@@ -160,7 +161,7 @@ public class Utils {
String paraStr;
if (para != null) {
if (para instanceof byte[]) {
- paraStr = new String((byte[]) para, Charset.forName("UTF-8"));
+ paraStr = new String((byte[]) para, StandardCharsets.UTF_8);
} else {
paraStr = para.toString();
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
deleted file mode 100644
index 73ceafa7299b256d7e83064b53bd638835a4b075..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-package com.taosdata.jdbc;
-
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.sql.*;
-import java.util.Properties;
-
-public class StatementTest {
- static Connection connection = null;
- static Statement statement = null;
- static String dbName = "test";
- static String tName = "t0";
- static String host = "localhost";
-
- @BeforeClass
- public static void createConnection() throws SQLException {
- try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- Properties properties = new Properties();
- properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", properties);
- statement = connection.createStatement();
- statement.executeUpdate("drop database if exists " + dbName);
-
- } catch (ClassNotFoundException e) {
- return;
- }
- }
-
- @Test
- public void testCase() {
- try {
- ResultSet rs = statement.executeQuery("show databases");
- ResultSetMetaData metaData = rs.getMetaData();
- while (rs.next()) {
- for (int i = 1; i <= metaData.getColumnCount(); i++) {
- System.out.print(metaData.getColumnLabel(i) + ":" + rs.getString(i) + "\t");
- }
- System.out.println();
- }
- } catch (SQLException e) {
- e.printStackTrace();
- }
- }
-
- @Test
- public void createTableAndQuery() throws SQLException {
- long ts = System.currentTimeMillis();
-
- statement.executeUpdate("create database if not exists " + dbName);
- statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)");
- statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)");
- statement.execute("select * from " + dbName + "." + tName);
- ResultSet resultSet = statement.getResultSet();
- Assert.assertNotNull(resultSet);
-
- boolean isClosed = statement.isClosed();
- Assert.assertEquals(false, isClosed);
- }
-
- @Test(expected = SQLException.class)
- public void testUnsupport() throws SQLException {
- Assert.assertNotNull(statement.unwrap(TSDBStatement.class));
- Assert.assertTrue(statement.isWrapperFor(TSDBStatement.class));
-
- statement.getMaxFieldSize();
- statement.setMaxFieldSize(0);
- statement.setEscapeProcessing(true);
- statement.cancel();
- statement.getWarnings();
- statement.clearWarnings();
- statement.setCursorName(null);
- statement.getMoreResults();
- statement.setFetchDirection(0);
- statement.getFetchDirection();
- statement.getResultSetConcurrency();
- statement.getResultSetType();
- statement.getConnection();
- statement.getMoreResults();
- statement.getGeneratedKeys();
- statement.executeUpdate(null, 0);
- statement.executeUpdate(null, new int[]{0});
- statement.executeUpdate(null, new String[]{"str1", "str2"});
- statement.getResultSetHoldability();
- statement.setPoolable(true);
- statement.isPoolable();
- statement.closeOnCompletion();
- statement.isCloseOnCompletion();
- }
-
- @AfterClass
- public static void close() {
- try {
- statement.execute("drop database if exists " + dbName);
- if (statement != null)
- statement.close();
- if (connection != null)
- connection.close();
- } catch (SQLException e) {
- e.printStackTrace();
- }
- }
-}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java
index dc6d0d322af5c90dc0eb9d42328c9864714838a1..30c6d99a4093f4781c20dcd0a960b4456ef50dbe 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java
@@ -380,14 +380,15 @@ public class TSDBConnectionTest {
conn.abort(null);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void setNetworkTimeout() throws SQLException {
conn.setNetworkTimeout(null, 1000);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void getNetworkTimeout() throws SQLException {
- conn.getNetworkTimeout();
+ int networkTimeout = conn.getNetworkTimeout();
+ Assert.assertEquals(0, networkTimeout);
}
@Test
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java
index 51535bc886aa0c3114a5a5bb74190300977a9ec9..c1dfa42511cdd45ade577415fe17c872e44f5fd8 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java
@@ -14,24 +14,6 @@ public class TSDBStatementTest {
private static Connection conn;
private static Statement stmt;
- @Test
- public void executeQuery() {
- try {
- ResultSet rs = stmt.executeQuery("show databases");
- Assert.assertNotNull(rs);
- ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
- }
- rs.close();
- } catch (SQLException e) {
- e.printStackTrace();
- }
- }
-
@Test
public void executeUpdate() {
final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32);
@@ -173,10 +155,6 @@ public class TSDBStatementTest {
Assert.assertEquals(3, meta.getColumnCount());
int count = 0;
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
count++;
}
Assert.assertEquals(1, count);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java
index 19bc5f713f9b406a943fc640fd03bb0503ed2967..4f37183e719e8eb21dcbd8dd625bd8d4d19214ce 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java
@@ -1,5 +1,6 @@
package com.taosdata.jdbc.cases;
+import org.junit.Assert;
import org.junit.Test;
import java.sql.Connection;
@@ -12,21 +13,19 @@ public class AppMemoryLeakTest {
@Test(expected = SQLException.class)
public void testCreateTooManyConnection() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
- int conCnt = 0;
while (true) {
Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
- System.out.println(conCnt++ + " : " + conn);
+ Assert.assertNotNull(conn);
}
}
@Test(expected = Exception.class)
public void testCreateTooManyStatement() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
- int stmtCnt = 0;
Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
while (true) {
Statement stmt = conn.createStatement();
- System.out.println(++stmtCnt + " : " + stmt);
+ Assert.assertNotNull(stmt);
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
similarity index 98%
rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java
rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
index a6fb6cfda044b4e88c5bd5509c51d114507d84f7..6702de9bdbf566eb1ecaea322d0338a64ffcd40c 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
@@ -1,4 +1,4 @@
-package com.taosdata.jdbc.rs;
+package com.taosdata.jdbc.cases;
import org.junit.Before;
import org.junit.Test;
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
index e2541e8109681569d75a7384255df09d6166c34e..e175d6d1141e125d58f2a1e4a4f64c3d1b22bfbb 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
@@ -60,7 +60,6 @@ public class BatchInsertTest {
final int index = i;
executorService.execute(() -> {
try {
- long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
@@ -73,8 +72,6 @@ public class BatchInsertTest {
}
statement.addBatch(sb.toString());
statement.executeBatch();
- long endTime = System.currentTimeMillis();
- System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
index 7f2d367dce4e5691603e23db8a14a4f857bb7b88..31893527aff2b3fd5f5512c729d60e0b8a137aa0 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
@@ -31,9 +31,6 @@ public class ConnectMultiTaosdByRestfulWithDifferentTokenTest {
ResultSet rs = stmt.executeQuery("select server_status()");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.println(meta.getColumnLabel(i) + ": " + rs.getString(i));
- }
}
} catch (SQLException e) {
e.printStackTrace();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java
index f97e555ad1b1acc7b6dd0024d893fcc1ccd4cc53..14c76985484857a92e174955c943caa21bdd2e72 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java
@@ -1,64 +1,76 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.utils.TimestampUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.*;
import java.sql.*;
public class DatetimeBefore1970Test {
- private static Connection conn;
+ private static final String host = "127.0.0.1";
+ private Connection conn;
@Test
public void test() {
try (Statement stmt = conn.createStatement()) {
+ // given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 08:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 07:59:59.999')");
+ ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
+ ResultSetMetaData metaData = rs.getMetaData();
+ Assert.assertEquals(2, metaData.getColumnCount());
+
+ // when
+ rs.next();
+ // then
+ Timestamp ts = rs.getTimestamp("ts");
+ Assert.assertEquals("1969-12-31 23:59:59.999", TimestampUtil.longToDatetime(ts.getTime()));
+
+ // when
+ rs.next();
+ // then
+ ts = rs.getTimestamp("ts");
+ Assert.assertEquals("1970-01-01 00:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
+
+ // when
+ rs.next();
+ // then
+ ts = rs.getTimestamp("ts");
+ Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
+
+ // when
+ rs.next();
+ // then
+ ts = rs.getTimestamp("ts");
+ Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime()));
- ResultSet rs = stmt.executeQuery("select * from weather");
- while (rs.next()) {
- Timestamp ts = rs.getTimestamp("ts");
- System.out.println("long: " + ts.getTime() + ", string: " + TimestampUtil.longToDatetime(ts.getTime()));
- }
} catch (SQLException e) {
e.printStackTrace();
}
}
- public static void main(String[] args) {
- System.out.println("timestamp: " + Long.MAX_VALUE + ", string: " + TimestampUtil.longToDatetime(Long.MAX_VALUE));
- System.out.println("timestamp: " + Long.MIN_VALUE + ", string: " + TimestampUtil.longToDatetime(Long.MIN_VALUE));
- System.out.println("timestamp: " + 0 + ", string: " + TimestampUtil.longToDatetime(0));
- System.out.println("timestamp: " + -1 + ", string: " + TimestampUtil.longToDatetime(-1));
- String datetime = "1970-01-01 00:00:00.000";
- System.out.println("timestamp: " + TimestampUtil.datetimeToLong(datetime) + ", string: " + datetime);
- datetime = "1969-12-31 23:59:59.999";
- System.out.println("timestamp: " + TimestampUtil.datetimeToLong(datetime) + ", string: " + datetime);
- }
-
- @BeforeClass
- public static void beforeClass() {
+ @Before
+ public void before() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- conn = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata");
+ conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_timestamp");
stmt.execute("create database if not exists test_timestamp keep 36500");
stmt.execute("use test_timestamp");
stmt.execute("create table weather(ts timestamp,f1 float)");
stmt.close();
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
e.printStackTrace();
}
}
- @AfterClass
- public static void afterClass() {
+ @After
+ public void after() {
try {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test_timestamp");
if (conn != null)
conn.close();
} catch (SQLException e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
index d7603312a090bedb17ed125edf6da535924964d0..bc11c7f34eeb719574a35beaf186cf637df2826f 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
@@ -20,7 +20,6 @@ public class ImportTest {
@BeforeClass
public static void before() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@@ -33,8 +32,6 @@ public class ImportTest {
stmt.close();
ts = System.currentTimeMillis();
- } catch (ClassNotFoundException e) {
- e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
@@ -47,7 +44,6 @@ public class ImportTest {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
- System.out.println("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
}
@@ -84,7 +80,6 @@ public class ImportTest {
long t = (++ts) + a;
sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") ");
}
- System.out.println(sqlBuilder.toString());
int rows = stmt.executeUpdate(sqlBuilder.toString());
assertEquals(50, rows);
} catch (SQLException e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java
index 54e4273ea3e54c8db18ca11c10f960468ab91c75..eb8f134227713e4c41224dc6a561916427290864 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java
@@ -41,7 +41,6 @@ public class MicroSecondPrecisionJNITest {
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
- System.out.println(timestamp);
long ts = timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java
similarity index 84%
rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java
rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java
index 5cb76cc0cb71e1cb879b696d05cc6682f93d0bdc..da6853d2fa06fb8436e79b95e9b7f1c2fa0785e2 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java
@@ -7,7 +7,7 @@ import org.junit.Test;
import java.sql.*;
import java.util.concurrent.TimeUnit;
-public class MultiThreadsWithSameStatmentTest {
+public class MultiThreadsWithSameStatementTest {
private class Service {
@@ -16,12 +16,11 @@ public class MultiThreadsWithSameStatmentTest {
public Service() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
stmt = conn.createStatement();
stmt.execute("create database if not exists jdbctest");
stmt.executeUpdate("create table if not exists jdbctest.weather (ts timestamp, f1 int)");
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
e.printStackTrace();
}
}
@@ -48,10 +47,6 @@ public class MultiThreadsWithSameStatmentTest {
ResultSet resultSet = service.stmt.executeQuery("select * from jdbctest.weather");
while (resultSet.next()) {
ResultSetMetaData metaData = resultSet.getMetaData();
- for (int i = 1; i <= metaData.getColumnCount(); i++) {
- System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i));
- }
- System.out.println();
}
resultSet.close();
service.release();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java
index 076125a752e5c4e81e24cbfa3299fa349939094b..ae0241bf31eea85083bf102c4123f7e30c2bd693 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java
@@ -17,11 +17,6 @@ public class NullValueInResultSetJNITest {
ResultSet rs = stmt.executeQuery("select * from weather");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- Object value = rs.getObject(i);
- System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
- }
- System.out.println();
}
} catch (SQLException e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java
index ea6e36ec1d005e48f36ee1283935664aca6fcc47..7fbb30a5244a53129807cd76472674ff1cfd6ae4 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java
@@ -19,9 +19,7 @@ public class NullValueInResultSetRestfulTest {
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
Object value = rs.getObject(i);
- System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
}
- System.out.println();
}
} catch (SQLException e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
index d0ba113b7a4a8f99e22eb8143905d0b086583e1d..535e56f7d7735a7cbd209fbb2a2fddd492021e15 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
@@ -21,7 +21,6 @@ public class QueryDataTest {
@Before
public void createDatabase() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@@ -36,7 +35,7 @@ public class QueryDataTest {
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
return;
}
}
@@ -44,7 +43,6 @@ public class QueryDataTest {
@Test
public void testQueryBinaryData() throws SQLException {
String insertSql = "insert into " + stbName + " values(now, 'taosdata')";
- System.out.println(insertSql);
statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName;
@@ -52,7 +50,6 @@ public class QueryDataTest {
while (rs.next()) {
String name = rs.getString(2);
- System.out.println("name = " + name);
assertEquals("taosdata", name);
}
rs.close();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java
index f6d6bb25566a8c6060311a1cf0c918a0456b8c24..be27b1350781245e3056185db4bbaa8b5105d2f0 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java
@@ -31,7 +31,7 @@ public class ResultSetMetaShouldNotBeNullRestfulTest {
// then
Assert.assertNotNull(metaData);
- Assert.assertEquals(0, columnCount);
+ Assert.assertEquals(2, columnCount);
}
@Test
@@ -53,7 +53,7 @@ public class ResultSetMetaShouldNotBeNullRestfulTest {
// then
Assert.assertEquals(true, execute);
Assert.assertNotNull(metaData);
- Assert.assertEquals(0, columnCount);
+ Assert.assertEquals(2, columnCount);
}
@Before
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
index 38c8cbb98c48342f131f4f5f0fee885bb446e83c..0022ceaf2123ac03192f761ef068ecf5ad333e6d 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
@@ -19,7 +19,6 @@ public class SelectTest {
@Before
public void createDatabaseAndTable() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@@ -31,8 +30,6 @@ public class SelectTest {
stmt.execute("create database if not exists " + dbName);
stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
stmt.close();
- } catch (ClassNotFoundException e) {
- return;
} catch (SQLException e) {
e.printStackTrace();
}
@@ -47,7 +44,6 @@ public class SelectTest {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
- System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
index 4575cb73a05fbbc19d6eaf2ba5be0ed27b61804c..332c171c380e061b747c74328ebeef37225e9b2d 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
@@ -23,7 +23,6 @@ public class StableTest {
@BeforeClass
public static void createDatabase() {
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@@ -34,8 +33,6 @@ public class StableTest {
statement.execute("create database if not exists " + dbName);
statement.execute("use " + dbName);
statement.close();
- } catch (ClassNotFoundException e) {
- return;
} catch (SQLException e) {
e.printStackTrace();
}
@@ -68,9 +65,6 @@ public class StableTest {
String sql = "describe " + stbName;
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
- for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
- System.out.println(i + ":" + rs.getString(i));
- }
num++;
}
rs.close();
@@ -86,9 +80,6 @@ public class StableTest {
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("describe t1");
while (rs.next()) {
- for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
- System.out.printf("%d: %s\n", i, rs.getString(i));
- }
num++;
}
rs.close();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java
index e239aa068bae0ee70d204aec4412bf29e5b36bf1..c2f732c86903dc80328f975260a4b2352e55efd9 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java
@@ -8,11 +8,6 @@ import java.sql.*;
public class RestfulDriverTest {
private static final String host = "127.0.0.1";
- @Test
- public void connect() {
-
- }
-
@Test
public void acceptsURL() throws SQLException {
Driver driver = new RestfulDriver();
@@ -27,9 +22,7 @@ public class RestfulDriverTest {
Driver driver = new RestfulDriver();
final String url = "";
DriverPropertyInfo[] propertyInfo = driver.getPropertyInfo(url, null);
- for (DriverPropertyInfo prop : propertyInfo) {
- System.out.println(prop);
- }
+ Assert.assertNotNull(propertyInfo);
}
@Test
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java
index c8bb69d82749e606f18d3298697ea0995029d064..b07dae8003d6e2fea073c0d240f59fb6db0c593f 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java
@@ -10,132 +10,151 @@ import java.util.Random;
public class RestfulJDBCTest {
private static final String host = "127.0.0.1";
- private static Connection connection;
- private Random random = new Random(System.currentTimeMillis());
+ private final Random random = new Random(System.currentTimeMillis());
+ private Connection connection;
- /**
- * select * from log.log
- **/
@Test
public void testCase001() {
- try {
- Statement statement = connection.createStatement();
- ResultSet resultSet = statement.executeQuery("select * from log.log");
- ResultSetMetaData metaData = resultSet.getMetaData();
- while (resultSet.next()) {
- for (int i = 1; i <= metaData.getColumnCount(); i++) {
- String column = metaData.getColumnLabel(i);
- String value = resultSet.getString(i);
- System.out.print(column + ":" + value + "\t");
- }
- System.out.println();
- }
- statement.close();
- } catch (SQLException e) {
- e.printStackTrace();
- }
+ // given
+ String sql = "drop database if exists restful_test";
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertFalse(execute);
+
+ // given
+ sql = "create database if not exists restful_test";
+ // when
+ execute = execute(connection, sql);
+ // then
+ Assert.assertFalse(execute);
+
+ // given
+ sql = "use restful_test";
+ // when
+ execute = execute(connection, sql);
+ // then
+ Assert.assertFalse(execute);
}
- /**
- * create database
- */
@Test
public void testCase002() {
- try (Statement stmt = connection.createStatement()) {
- stmt.execute("drop database if exists restful_test");
- stmt.execute("create database if not exists restful_test");
- stmt.execute("use restful_test");
- } catch (SQLException e) {
- e.printStackTrace();
- }
+ // given
+ String sql = "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)";
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertFalse(execute);
}
- /**
- * create super table
- ***/
@Test
- public void testCase003() {
- try (Statement stmt = connection.createStatement()) {
- stmt.execute("create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)");
- } catch (SQLException e) {
- e.printStackTrace();
+ public void testCase004() {
+ for (int i = 1; i <= 100; i++) {
+ // given
+ String sql = "create table t" + i + " using weather tags('beijing', '" + i + "')";
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertFalse(execute);
}
}
@Test
- public void testCase004() {
- try (Statement stmt = connection.createStatement()) {
- for (int i = 1; i <= 100; i++) {
- stmt.execute("create table t" + i + " using weather tags('beijing', '" + i + "')");
+ public void testCase005() {
+ int rows = 0;
+ for (int i = 0; i < 10; i++) {
+ for (int j = 1; j <= 100; j++) {
+
+ // given
+ long currentTimeMillis = System.currentTimeMillis();
+ String sql = "insert into t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")";
+ // when
+ int affectRows = executeUpdate(connection, sql);
+ // then
+ Assert.assertEquals(1, affectRows);
+
+ rows += affectRows;
}
- } catch (SQLException e) {
- e.printStackTrace();
}
+ Assert.assertEquals(1000, rows);
}
+ @Test
+ public void testCase006() throws SQLException {
+ // given
+ String sql = "select * from weather";
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ ResultSetMetaData meta = rs.getMetaData();
+
+ // then
+ Assert.assertEquals(5, meta.getColumnCount());
+
+ while (rs.next()) {
+ Assert.assertNotNull(rs.getTimestamp("ts"));
+ Assert.assertNotNull(rs.getFloat("temperature"));
+ Assert.assertNotNull(rs.getInt("humidity"));
+ Assert.assertNotNull(rs.getString("location"));
+ }
+ }
@Test
- public void testCase005() {
+ public void testCase007() {
+ // given
+ String sql = "drop database restful_test";
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
+ }
+
+ private int executeUpdate(Connection connection, String sql) {
try (Statement stmt = connection.createStatement()) {
- int rows = 0;
- for (int i = 0; i < 10; i++) {
- for (int j = 1; j <= 100; j++) {
- long currentTimeMillis = System.currentTimeMillis();
- int affectRows = stmt.executeUpdate("insert into t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")");
- Assert.assertEquals(1, affectRows);
- rows += affectRows;
- }
- }
- Assert.assertEquals(1000, rows);
+ return stmt.executeUpdate(sql);
} catch (SQLException e) {
e.printStackTrace();
}
+ return 0;
}
- @Test
- public void testCase006() {
+ private boolean execute(Connection connection, String sql) {
try (Statement stmt = connection.createStatement()) {
- ResultSet rs = stmt.executeQuery("select * from weather");
- while (rs.next()) {
- System.out.print("ts: " + rs.getTimestamp("ts"));
- System.out.print(", temperature: " + rs.getString("temperature"));
- System.out.print(", humidity: " + rs.getString("humidity"));
- System.out.println(", location: " + rs.getString("location"));
- }
+ return stmt.execute(sql);
} catch (SQLException e) {
e.printStackTrace();
}
+ return false;
}
- @Test
- public void testCase007() {
- try (Statement stmt = connection.createStatement()) {
- ResultSet rs = stmt.executeQuery("select * from weather");
- ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- int columnCount = meta.getColumnCount();
- for (int i = 1; i <= columnCount; i++) {
- String columnLabel = meta.getColumnLabel(i);
- String value = rs.getString(i);
- System.out.print(columnLabel + ": " + value + "\t");
- }
- System.out.println();
- }
+
+ private ResultSet executeQuery(Connection connection, String sql) {
+ try (Statement statement = connection.createStatement()) {
+ return statement.executeQuery(sql);
} catch (SQLException e) {
e.printStackTrace();
}
+ return null;
}
- @BeforeClass
- public static void before() throws ClassNotFoundException, SQLException {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
- connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
+ @Before
+ public void before() {
+ try {
+ connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
}
- @AfterClass
- public static void after() throws SQLException {
- if (connection != null)
- connection.close();
+ @After
+ public void after() {
+ try {
+ if (connection != null)
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
index ee457ff4127ccf3fe88cf277d581a3dcb3475df9..4760a723e4b4e662326987290c2c630803f8f470 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java
@@ -15,6 +15,8 @@ public class RestfulPreparedStatementTest {
private static PreparedStatement pstmt_insert;
private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?";
private static PreparedStatement pstmt_select;
+ private static final String sql_without_parameters = "select count(*) from t1";
+ private static PreparedStatement pstmt_without_parameters;
@Test
public void executeQuery() throws SQLException {
@@ -27,12 +29,9 @@ public class RestfulPreparedStatementTest {
ResultSet rs = pstmt_select.executeQuery();
Assert.assertNotNull(rs);
ResultSetMetaData meta = rs.getMetaData();
- while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
- }
+ int columnCount = meta.getColumnCount();
+ Assert.assertEquals(10, columnCount);
+ Assert.assertNotNull(rs);
}
@Test
@@ -240,6 +239,7 @@ public class RestfulPreparedStatementTest {
@Test
public void clearParameters() throws SQLException {
pstmt_insert.clearParameters();
+ pstmt_without_parameters.clearParameters();
}
@Test
@@ -373,18 +373,20 @@ public class RestfulPreparedStatementTest {
@BeforeClass
public static void beforeClass() {
try {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
- try (Statement stmt = conn.createStatement()) {
- stmt.execute("drop database if exists test_pstmt");
- stmt.execute("create database if not exists test_pstmt");
- stmt.execute("use test_pstmt");
- stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))");
- stmt.execute("create table t1 using weather tags('beijing')");
- }
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test_pstmt");
+ stmt.execute("create database if not exists test_pstmt");
+ stmt.execute("use test_pstmt");
+ stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))");
+ stmt.execute("create table t1 using weather tags('beijing')");
+ stmt.close();
+
pstmt_insert = conn.prepareStatement(sql_insert);
pstmt_select = conn.prepareStatement(sql_select);
- } catch (ClassNotFoundException | SQLException e) {
+ pstmt_without_parameters = conn.prepareStatement(sql_without_parameters);
+ } catch (SQLException e) {
e.printStackTrace();
}
}
@@ -396,6 +398,8 @@ public class RestfulPreparedStatementTest {
pstmt_insert.close();
if (pstmt_select != null)
pstmt_select.close();
+ if (pstmt_without_parameters != null)
+ pstmt_without_parameters.close();
if (conn != null)
conn.close();
} catch (SQLException e) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java
index 1be32b502d3f8f7c1b94cd1a8940073520e11b12..a7b3ceb9d3bb243a2a053d5289afe39d3c870d79 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java
@@ -12,6 +12,7 @@ import java.util.UUID;
public class RestfulStatementTest {
private static final String host = "127.0.0.1";
+
private static Connection conn;
private static Statement stmt;
@@ -21,11 +22,11 @@ public class RestfulStatementTest {
ResultSet rs = stmt.executeQuery("show databases");
Assert.assertNotNull(rs);
ResultSetMetaData meta = rs.getMetaData();
+ int columnCount = meta.getColumnCount();
+ Assert.assertTrue(columnCount > 1);
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals("name", meta.getColumnLabel(1));
+ Assert.assertNotNull(rs.getString("name"));
}
rs.close();
} catch (SQLException e) {
@@ -174,10 +175,10 @@ public class RestfulStatementTest {
Assert.assertEquals(3, meta.getColumnCount());
int count = 0;
while (rs.next()) {
- for (int i = 1; i <= meta.getColumnCount(); i++) {
- System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
- }
- System.out.println();
+ Assert.assertEquals("ts", meta.getColumnLabel(1));
+ Assert.assertNotNull(rs.getTimestamp(1));
+ Assert.assertEquals("temperature", meta.getColumnLabel(2));
+ Assert.assertEquals(22.33, rs.getFloat(2), 0.001f);
count++;
}
Assert.assertEquals(1, count);
@@ -388,15 +389,12 @@ public class RestfulStatementTest {
@BeforeClass
public static void beforeClass() {
try {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties);
stmt = conn.createStatement();
- } catch (ClassNotFoundException e) {
- e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
index 4ad9826384a93e221b1181b72fa576bf72ebaff4..6b88de258dd4addda06cfb6e971b9d4dd267b7b4 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
@@ -1,342 +1,586 @@
package com.taosdata.jdbc.rs;
-import com.taosdata.jdbc.utils.SQLExecutor;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
+import org.junit.*;
import org.junit.runners.MethodSorters;
import java.sql.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class SQLTest {
+
private static final String host = "127.0.0.1";
private static Connection connection;
@Test
public void testCase001() {
+ // given
String sql = "create database if not exists restful_test";
- SQLExecutor.execute(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase002() {
+ // given
String sql = "use restful_test";
- SQLExecutor.execute(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase003() {
+ // given
String sql = "show databases";
- SQLExecutor.executeWithResult(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase004() {
+ // given
String sql = "show tables";
- SQLExecutor.executeWithResult(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase005() {
+ // given
String sql = "show stables";
- SQLExecutor.executeWithResult(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase006() {
+ // given
String sql = "show dnodes";
- SQLExecutor.executeWithResult(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase007() {
+ // given
String sql = "show vgroups";
- SQLExecutor.executeWithResult(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase008() {
+ // given
String sql = "drop table if exists restful_test.weather";
- SQLExecutor.execute(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase009() {
+ // given
String sql = "create table if not exists restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))";
- SQLExecutor.execute(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase010() {
+ // given
String sql = "create table t1 using restful_test.weather tags('北京')";
- SQLExecutor.execute(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase011() {
+ // given
String sql = "insert into restful_test.t1 values(now, 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase012() {
+ // given
String sql = "insert into restful_test.t1 values('2020-01-01 00:00:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase013() {
+ // given
String sql = "insert into restful_test.t1 values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase014() {
+ // given
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:03:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase015() {
+ // given
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase016() {
+ // given
String sql = "insert into t1 values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t2 values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase017() {
+ // given
String sql = "Insert into t3 using weather tags('广东') values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t4 using weather tags('天津') values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ boolean execute = execute(connection, sql);
+
+ // then
+ Assert.assertFalse(execute);
}
@Test
public void testCase018() {
+ // given
String sql = "select * from restful_test.t1";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase019() {
+ // given
String sql = "select * from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase020() {
+ // given
String sql = "select ts, temperature from restful_test.t1";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase021() {
+ // given
String sql = "select ts, temperature from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase022() {
+ // given
String sql = "select temperature, ts from restful_test.t1";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase023() {
+ // given
String sql = "select temperature, ts from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ boolean execute = execute(connection, sql);
+ // then
+ Assert.assertTrue(execute);
}
@Test
public void testCase024() {
+ // given
String sql = "import into restful_test.t5 using weather tags('石家庄') values('2020-01-01 00:01:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+ // when
+ int affectedRows = executeUpdate(connection, sql);
+ // then
+ Assert.assertEquals(1, affectedRows);
}
@Test
public void testCase025() {
+ // given
String sql = "import into restful_test.t6 using weather tags('沈阳') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+ // when
+ int affectedRows = executeUpdate(connection, sql);
+ // then
+ Assert.assertEquals(2, affectedRows);
}
@Test
public void testCase026() {
+ // given
String sql = "import into restful_test.t7 using weather tags('长沙') values('2020-01-01 00:01:00.000', 22.22) restful_test.t8 using weather tags('吉林') values('2020-01-01 00:01:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+
+ // when
+ int affectedRows = executeUpdate(connection, sql);
+ // then
+ Assert.assertEquals(2, affectedRows);
}
@Test
public void testCase027() {
+ // given
String sql = "import into restful_test.t9 using weather tags('武汉') values('2020-01-01 00:01:00.000', 22.22) ,('2020-01-02 00:01:00.000', 22.22) restful_test.t10 using weather tags('哈尔滨') values('2020-01-01 00:01:00.000', 22.22),('2020-01-02 00:01:00.000', 22.22)";
- SQLExecutor.executeUpdate(connection, sql);
+ // when
+ int affectedRows = executeUpdate(connection, sql);
+ // then
+ Assert.assertEquals(4, affectedRows);
}
@Test
public void testCase028() {
+ // given
String sql = "select location, temperature, ts from restful_test.weather where temperature > 1";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase029() {
String sql = "select location, temperature, ts from restful_test.weather where temperature < 1";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase030() {
String sql = "select location, temperature, ts from restful_test.weather where ts > now";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase031() {
String sql = "select location, temperature, ts from restful_test.weather where ts < now";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase032() {
String sql = "select count(*) from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase033() {
String sql = "select first(*) from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase034() {
String sql = "select last(*) from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase035() {
String sql = "select last_row(*) from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase036() {
String sql = "select ts, ts as primary_key from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase037() {
String sql = "select database()";
- SQLExecutor.execute(connection, "use restful_test");
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase038() {
String sql = "select client_version()";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase039() {
String sql = "select server_status()";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase040() {
String sql = "select server_status() as status";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase041() {
String sql = "select tbname, location from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase042() {
String sql = "select count(tbname) from restful_test.weather";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase043() {
String sql = "select * from restful_test.weather where ts < now - 1h";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase044() {
String sql = "select * from restful_test.weather where ts < now - 1h and location like '%'";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase045() {
String sql = "select * from restful_test.weather where ts < now - 1h order by ts";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase046() {
String sql = "select last(*) from restful_test.weather where ts < now - 1h group by tbname order by tbname";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase047() {
String sql = "select * from restful_test.weather limit 2";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase048() {
String sql = "select * from restful_test.weather limit 2 offset 5";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase049() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts ";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase050() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase051() {
String sql = "select * from restful_test.t1 tt, restful_test.t3 yy where tt.ts = yy.ts";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase052() {
String sql = "select server_status()";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
}
@Test
public void testCase053() {
String sql = "select avg(cpu_taosd), avg(cpu_system), max(cpu_cores), avg(mem_taosd), avg(mem_system), max(mem_total), avg(disk_used), max(disk_total), avg(band_speed), avg(io_read), avg(io_write), sum(req_http), sum(req_select), sum(req_insert) from log.dn1 where ts> now - 60m and ts<= now interval(1m) fill(value, 0)";
- SQLExecutor.executeQuery(connection, sql);
+ // when
+ ResultSet rs = executeQuery(connection, sql);
+ // then
+ Assert.assertNotNull(rs);
+ }
+
+ private boolean execute(Connection connection, String sql) {
+ try (Statement statement = connection.createStatement()) {
+ return statement.execute(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ return false;
+ }
+
+ private ResultSet executeQuery(Connection connection, String sql) {
+ try (Statement statement = connection.createStatement()) {
+ return statement.executeQuery(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ private int executeUpdate(Connection connection, String sql) {
+ try (Statement statement = connection.createStatement()) {
+ return statement.executeUpdate(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ return 0;
}
@BeforeClass
- public static void before() throws ClassNotFoundException, SQLException {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
+ public static void before() throws SQLException {
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java
deleted file mode 100644
index bf034bf458bcb8eadaaacb5cf633f0905a8c1bd6..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java
+++ /dev/null
@@ -1,74 +0,0 @@
-package com.taosdata.jdbc.utils;
-
-import java.sql.*;
-
-public class SQLExecutor {
-
- // insert, import
- public static void executeUpdate(Connection connection, String sql) {
- try (Statement statement = connection.createStatement()) {
- long start = System.currentTimeMillis();
- int affectedRows = statement.executeUpdate(sql);
- long end = System.currentTimeMillis();
- System.out.println("[ affected rows : " + affectedRows + " ] time cost: " + (end - start) + " ms, execute statement ====> " + sql);
- } catch (SQLException e) {
- e.printStackTrace();
- }
- }
-
- // show databases, show tables, show stables
- public static void executeWithResult(Connection connection, String sql) {
- try (Statement statement = connection.createStatement()) {
- statement.execute(sql);
- ResultSet resultSet = statement.getResultSet();
- printResult(resultSet);
- } catch (SQLException e) {
- e.printStackTrace();
- }
- }
-
- // use database, create database, create table, drop table...
- public static void execute(Connection connection, String sql) {
- try (Statement statement = connection.createStatement()) {
- long start = System.currentTimeMillis();
- boolean execute = statement.execute(sql);
- long end = System.currentTimeMillis();
- printSql(sql, execute, (end - start));
- } catch (SQLException e) {
- System.out.println("ERROR execute SQL ===> " + sql);
- e.printStackTrace();
- }
- }
-
- // select
- public static void executeQuery(Connection connection, String sql) {
- try (Statement statement = connection.createStatement()) {
- long start = System.currentTimeMillis();
- ResultSet resultSet = statement.executeQuery(sql);
- long end = System.currentTimeMillis();
- printSql(sql, true, (end - start));
- printResult(resultSet);
- } catch (SQLException e) {
- System.out.println("ERROR execute SQL ===> " + sql);
- e.printStackTrace();
- }
- }
-
- private static void printSql(String sql, boolean succeed, long cost) {
- System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
- }
-
- private static void printResult(ResultSet resultSet) throws SQLException {
- ResultSetMetaData metaData = resultSet.getMetaData();
- while (resultSet.next()) {
- StringBuilder sb = new StringBuilder();
- for (int i = 1; i <= metaData.getColumnCount(); i++) {
- String columnLabel = metaData.getColumnLabel(i);
- String value = resultSet.getString(i);
- sb.append(columnLabel + ": " + value + "\t");
- }
- System.out.println(sb.toString());
- }
- }
-
-}
diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js
index f3961e3787c4fb6d7da7092b68632d08a8b57e20..03d27e5593ccb15d8ff47cd3c3dedba765d14fc1 100644
--- a/src/connector/nodejs/nodetaos/cinterface.js
+++ b/src/connector/nodejs/nodetaos/cinterface.js
@@ -15,36 +15,18 @@ const { NULL_POINTER } = require('ref-napi');
module.exports = CTaosInterface;
-function convertMillisecondsToDatetime(time) {
- return new TaosObjects.TaosTimestamp(time);
-}
-function convertMicrosecondsToDatetime(time) {
- return new TaosObjects.TaosTimestamp(time * 0.001, true);
-}
-
-function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
- timestampConverter = convertMillisecondsToDatetime;
- if (micro == true) {
- timestampConverter = convertMicrosecondsToDatetime;
- }
+function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
- let queue = [];
- let time = 0;
- for (let i = currOffset; i < currOffset + nbytes; i++) {
- queue.push(data[i]);
- }
- for (let i = queue.length - 1; i >= 0; i--) {
- time += queue[i] * Math.pow(16, i * 2);
- }
+ let time = data.readInt64LE(currOffset);
currOffset += nbytes;
- res.push(timestampConverter(time));
+ res.push(new TaosObjects.TaosTimestamp(time, precision));
}
return res;
}
-function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = new Array(data.length);
for (let i = 0; i < data.length; i++) {
@@ -60,7 +42,7 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
}
return res;
}
-function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -71,7 +53,7 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false
}
return res;
}
-function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -82,7 +64,7 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = fals
}
return res;
}
-function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -93,7 +75,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
}
return res;
}
-function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -104,7 +86,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -115,7 +97,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -126,30 +108,20 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+
+function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
+
let currOffset = 0;
while (currOffset < data.length) {
- let dataEntry = data.slice(currOffset, currOffset + nbytes);
- if (dataEntry[0] == FieldTypes.C_BINARY_NULL) {
- res.push(null);
- }
- else {
- res.push(ref.readCString(dataEntry));
- }
+ let len = data.readIntLE(currOffset, 2);
+ let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
+ res.push(dataEntry.toString("utf-8"));
currOffset += nbytes;
}
return res;
}
-function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
- data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
- let res = [];
- let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
- //TODO: should use the correct character encoding
- res.push(dataEntry.toString("utf-8"));
- return res;
-}
// Object with all the relevant converters from pblock data to javascript readable data
let convertFunctions = {
@@ -160,7 +132,7 @@ let convertFunctions = {
[FieldTypes.C_BIGINT]: convertBigint,
[FieldTypes.C_FLOAT]: convertFloat,
[FieldTypes.C_DOUBLE]: convertDouble,
- [FieldTypes.C_BINARY]: convertBinary,
+ [FieldTypes.C_BINARY]: convertNchar,
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
[FieldTypes.C_NCHAR]: convertNchar
}
@@ -282,7 +254,7 @@ CTaosInterface.prototype.config = function config() {
CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
let _host, _user, _password, _db, _port;
try {
- _host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ _host = host != null ? ref.allocCString(host) : ref.NULL;
}
catch (err) {
throw "Attribute Error: host is expected as a str";
@@ -300,7 +272,7 @@ CTaosInterface.prototype.connect = function connect(host = null, user = "root",
throw "Attribute Error: password is expected as a str";
}
try {
- _db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ _db = db != null ? ref.allocCString(db) : ref.NULL;
}
catch (err) {
throw "Attribute Error: db is expected as a str";
@@ -355,8 +327,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
}
var fieldL = this.libtaos.taos_fetch_lengths(result);
-
- let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = this.libtaos.taos_result_precision(result);
var fieldlens = [];
@@ -383,7 +354,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
- blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision);
}
}
return { blocks: blocks, num_of_rows }
@@ -433,7 +404,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let row = cti.libtaos.taos_fetch_row(result2);
let fields = cti.fetchFields_a(result2);
- let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length);
blocks.fill(null);
numOfRows2 = Math.abs(numOfRows2);
@@ -459,7 +430,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let prow = ref.reinterpret(row, 8, i * 8);
prow = prow.readPointer();
prow = ref.ref(prow);
- blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision);
//offset += fields[i]['bytes'] * numOfRows2;
}
}
@@ -582,7 +553,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
var cti = this;
let asyncCallbackWrapper = function (param2, result2, row) {
let fields = cti.fetchFields_a(result2);
- let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length);
blocks.fill(null);
let numOfRows2 = 1;
@@ -592,7 +563,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
- blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision);
offset += fields[i]['bytes'] * numOfRows2;
}
}
diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js
index 809d17a016ac5aafc504c71f6417858e9d00821b..0fc8dc8ef1a057c7e410956a2b68072e65cbb613 100644
--- a/src/connector/nodejs/nodetaos/taosobjects.js
+++ b/src/connector/nodejs/nodetaos/taosobjects.js
@@ -1,5 +1,5 @@
const FieldTypes = require('./constants');
-
+const util = require('util');
/**
* Various objects such as TaosRow and TaosColumn that help make parsing data easier
* @module TaosObjects
@@ -14,7 +14,7 @@ const FieldTypes = require('./constants');
* var trow = new TaosRow(row);
* console.log(trow.data);
*/
-function TaosRow (row) {
+function TaosRow(row) {
this.data = row;
this.length = row.length;
return this;
@@ -29,10 +29,10 @@ function TaosRow (row) {
*/
function TaosField(field) {
- this._field = field;
- this.name = field.name;
- this.type = FieldTypes.getType(field.type);
- return this;
+ this._field = field;
+ this.name = field.name;
+ this.type = FieldTypes.getType(field.type);
+ return this;
}
/**
@@ -42,39 +42,110 @@ function TaosField(field) {
* @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000
*/
class TaosTimestamp extends Date {
- constructor(date, micro = false) {
- super(date);
- this._type = 'TaosTimestamp';
- if (micro) {
- this.microTime = date - Math.floor(date);
+ constructor(date, precision = 0) {
+ if (precision === 1) {
+ super(Math.floor(date / 1000));
+ this.precisionExtras = date % 1000;
+ } else if (precision === 2) {
+ super(parseInt(date / 1000000));
+ // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
+ this.precisionExtras = parseInt(BigInt(date) % 1000000n);
+ } else {
+ super(parseInt(date));
+ }
+ this.precision = precision;
+ }
+
+ /**
+ * TDengine raw timestamp.
+ * @returns raw taos timestamp (int64)
+ */
+ taosTimestamp() {
+ if (this.precision == 1) {
+ return (this * 1000 + this.precisionExtras);
+ } else if (this.precision == 2) {
+ return (this * 1000000 + this.precisionExtras);
+ } else {
+ return Math.floor(this);
+ }
+ }
+
+ /**
+ * Gets the microseconds of a Date.
+ * @return {Int} A microseconds integer
+ */
+ getMicroseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras / 1000;
+ } else {
+ return 0;
+ }
+ }
+ /**
+ * Gets the nanoseconds of a TaosTimestamp.
+ * @return {Int} A nanoseconds integer
+ */
+ getNanoseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras;
+ } else {
+ return 0;
+ }
+ }
+
+ /**
+ * @returns {String} a string for timestamp string format
+ */
+ _precisionExtra() {
+ if (this.precision == 1) {
+ return String(this.precisionExtras).padStart(3, '0');
+ } else if (this.precision == 2) {
+ return String(this.precisionExtras).padStart(6, '0');
+ } else {
+ return '';
}
}
/**
* @function Returns the date into a string usable by TDengine
* @return {string} A Taos Timestamp String
*/
- toTaosString(){
+ toTaosString() {
var tzo = -this.getTimezoneOffset(),
- dif = tzo >= 0 ? '+' : '-',
- pad = function(num) {
- var norm = Math.floor(Math.abs(num));
- return (norm < 10 ? '0' : '') + norm;
- },
- pad2 = function(num) {
- var norm = Math.floor(Math.abs(num));
- if (norm < 10) return '00' + norm;
- if (norm < 100) return '0' + norm;
- if (norm < 1000) return norm;
- };
+ dif = tzo >= 0 ? '+' : '-',
+ pad = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ return (norm < 10 ? '0' : '') + norm;
+ },
+ pad2 = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ if (norm < 10) return '00' + norm;
+ if (norm < 100) return '0' + norm;
+ if (norm < 1000) return norm;
+ };
return this.getFullYear() +
- '-' + pad(this.getMonth() + 1) +
- '-' + pad(this.getDate()) +
- ' ' + pad(this.getHours()) +
- ':' + pad(this.getMinutes()) +
- ':' + pad(this.getSeconds()) +
- '.' + pad2(this.getMilliseconds()) +
- '' + (this.microTime ? pad2(Math.round(this.microTime * 1000)) : '');
+ '-' + pad(this.getMonth() + 1) +
+ '-' + pad(this.getDate()) +
+ ' ' + pad(this.getHours()) +
+ ':' + pad(this.getMinutes()) +
+ ':' + pad(this.getSeconds()) +
+ '.' + pad2(this.getMilliseconds()) +
+ '' + this._precisionExtra();
+ }
+
+ /**
+ * Custom console.log
+ * @returns {String} string format for debug
+ */
+ [util.inspect.custom](depth, opts) {
+ return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts);
+ }
+ toString() {
+ return this.toTaosString();
}
}
-module.exports = {TaosRow, TaosField, TaosTimestamp}
+module.exports = { TaosRow, TaosField, TaosTimestamp }
diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json
index d21b62108b14e5a132ad5457d190bbcbc58b73a8..db37318a164c6207432ebb64defb608381d2cb49 100644
--- a/src/connector/nodejs/package.json
+++ b/src/connector/nodejs/package.json
@@ -1,13 +1,13 @@
{
"name": "td2.0-connector",
- "version": "2.0.7",
+ "version": "2.0.9",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {
"test": "test"
},
"scripts": {
- "test": "node test/test.js"
+ "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js"
},
"repository": {
"type": "git",
diff --git a/src/connector/nodejs/tdengine.js b/src/connector/nodejs/tdengine.js
index aa296279d5e20f3d049d478ea2af44ea47a2b8e3..047c744a4fc90c6306e851eaa529a7f9f578fe12 100644
--- a/src/connector/nodejs/tdengine.js
+++ b/src/connector/nodejs/tdengine.js
@@ -1,4 +1,4 @@
var TDengineConnection = require('./nodetaos/connection.js')
-module.exports.connect = function (connection=null) {
+module.exports.connect = function (connection={}) {
return new TDengineConnection(connection);
}
diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js
index bf4bb2c54188d3eb0f9c7fb5306912effc7b0760..caf05955da4c960ebedc872f400c17d18be767dd 100644
--- a/src/connector/nodejs/test/test.js
+++ b/src/connector/nodejs/test/test.js
@@ -1,5 +1,5 @@
const taos = require('../tdengine');
-var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10});
+var conn = taos.connect();
var c1 = conn.cursor();
let stime = new Date();
let interval = 1000;
diff --git a/src/connector/nodejs/test/testMicroseconds.js b/src/connector/nodejs/test/testMicroseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f
--- /dev/null
+++ b/src/connector/nodejs/test/testMicroseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_us';
+c1.execute('create database if not exists ' + dbname + ' precision "us"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914) {
+ throw "microseconds not match!";
+}
+if (ts.getMicroseconds() % 1000 !== 914) {
+ throw "micronsecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_us_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/src/connector/nodejs/test/testNanoseconds.js b/src/connector/nodejs/test/testNanoseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea
--- /dev/null
+++ b/src/connector/nodejs/test/testNanoseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_ns';
+c1.execute('create database if not exists ' + dbname + ' precision "ns"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914405, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914405) {
+ throw "nanosecond not match!";
+}
+if (ts.getNanoseconds() % 1000000 !== 914405) {
+ throw "nanosecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_ns_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt
index 5a93ac3f7e2934fd8383c5a18f22c24845793f1a..87746f23ae3796f4d0ab20257f90599860430568 100644
--- a/src/connector/odbc/CMakeLists.txt
+++ b/src/connector/odbc/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX_64)
@@ -20,8 +20,8 @@ IF (TD_LINUX_64)
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0)
message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case")
else ()
- SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion")
- SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion")
+ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ")
+ SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ")
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tools)
ADD_SUBDIRECTORY(examples)
diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt
index f0e50415e2e4f14e1c247b834e1e52a2c2fd2868..e990647e1aadcafb8b3306ee7e43a4d3ac285c94 100644
--- a/src/connector/odbc/src/CMakeLists.txt
+++ b/src/connector/odbc/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
add_subdirectory(base)
diff --git a/src/connector/odbc/src/base/CMakeLists.txt b/src/connector/odbc/src/base/CMakeLists.txt
index fa13f3e07737bb6c2a36e5296a49a9f282346e3b..e34091360900a3a856d9fe56bb9fec994f4ba321 100644
--- a/src/connector/odbc/src/base/CMakeLists.txt
+++ b/src/connector/odbc/src/base/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
aux_source_directory(. SRC)
diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt
index e9ed2996c74e2c59d56245e6fc1e932ebb07dfb0..f01ccb8728eb9a2a4695a8a0c133422e3134b8e2 100644
--- a/src/cq/CMakeLists.txt
+++ b/src/cq/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/cq/test/CMakeLists.txt b/src/cq/test/CMakeLists.txt
index cd124567afd8766173cf07e7a6191ab473be1714..d713dd7401c4f2d791ee0b4de1216b6ede558507 100644
--- a/src/cq/test/CMakeLists.txt
+++ b/src/cq/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
LIST(APPEND CQTEST_SRC ./cqtest.c)
diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt
index f8d8f88438429f1c0be405825cef4ab9c1b130bc..e7ac1be5b1160df447271eeec78e2939923b6d53 100644
--- a/src/dnode/CMakeLists.txt
+++ b/src/dnode/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index c80e1059b40b08b9eb592dc150974bef9746a1a9..2f83e5f6dc96f660162fdbda7fea034658b8cab7 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -117,7 +117,14 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
if (pMsg->pCont == NULL) return;
- if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) {
+ SRunStatus dnodeStatus = dnodeGetRunStatus();
+ if (dnodeStatus == TSDB_RUN_STATUS_STOPPED) {
+ dError("RPC %p, shell msg:%s is ignored since dnode exiting", pMsg->handle, taosMsg[pMsg->msgType]);
+ rpcMsg.code = TSDB_CODE_DND_EXITING;
+ rpcSendResponse(&rpcMsg);
+ rpcFreeCont(pMsg->pCont);
+ return;
+ } else if (dnodeStatus != TSDB_RUN_STATUS_RUNING) {
dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
rpcMsg.code = TSDB_CODE_APP_NOT_READY;
rpcSendResponse(&rpcMsg);
diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c
index 90bae8b9dd73ca80efab8090d5a189cf8c1a8966..daf62aac94a5e10a5899ad9c8593b5ff7df86f46 100644
--- a/src/dnode/src/dnodeVMgmt.c
+++ b/src/dnode/src/dnodeVMgmt.c
@@ -170,7 +170,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg);
- void *pVnode = vnodeAcquire(pAlter->cfg.vgId);
+ void *pVnode = vnodeAcquireNotClose(pAlter->cfg.vgId);
if (pVnode != NULL) {
dDebug("vgId:%d, alter vnode msg is received", pAlter->cfg.vgId);
int32_t code = vnodeAlter(pVnode, pAlter);
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index ea738661ce2813e13468ad91b4dc1d54775db21f..41016d7b99d049922e4de7dc0cbd3dafd2bc4ebf 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -63,7 +63,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
pHead->contLen = htonl(pHead->contLen);
assert(pHead->contLen > 0);
- void *pVnode = vnodeAcquire(pHead->vgId);
+ void *pVnode = vnodeAcquireNotClose(pHead->vgId);
if (pVnode != NULL) {
code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
if (code == TSDB_CODE_SUCCESS) queuedMsgNum++;
diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c
index 26084a52eb1806c4fdce592d47471d92ec3e1cdb..bbf257ff953779fd9d097ba82e1b42c0b91d1531 100644
--- a/src/dnode/src/dnodeVWrite.c
+++ b/src/dnode/src/dnodeVWrite.c
@@ -85,7 +85,7 @@ void dnodeDispatchToVWriteQueue(SRpcMsg *pRpcMsg) {
pMsg->vgId = htonl(pMsg->vgId);
pMsg->contLen = htonl(pMsg->contLen);
- void *pVnode = vnodeAcquire(pMsg->vgId);
+ void *pVnode = vnodeAcquireNotClose(pMsg->vgId);
if (pVnode == NULL) {
code = TSDB_CODE_VND_INVALID_VGROUP_ID;
} else {
diff --git a/src/inc/taos.h b/src/inc/taos.h
index 9f72945ef03f28fb54ab05f84be810a0f9d5a66a..a62f38792499994ebb54567c43ecddec829de368 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -169,6 +169,8 @@ DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr);
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
+DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 62136020324bddb4fa98e05829bf851c64e85eb2..61b2c740117cc47efbc6b4e9498313272512d7f4 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -82,6 +82,8 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DEFAULT_USER "root"
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
+#elif (_TD_TQ_ == true)
+#define TSDB_DEFAULT_PASS "tqueue"
#else
#define TSDB_DEFAULT_PASS "taosdata"
#endif
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index f73ae9e543908cd88a2e01c025329cb1d968408e..835969a03fde5b61fab6731cbc475312dd911b69 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -198,6 +198,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length")
#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress")
#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories")
+#define TSDB_CODE_DND_EXITING TAOS_DEF_ERROR_CODE(0, 0x0406) //"Dnode is exiting"
// vnode
#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 54b5f16fca5c28d8b45e7227a33b9169a7d9720d..e1a63cc860205afdffdca9444262dd82d9e86065 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -402,7 +402,7 @@ typedef struct SColIndex {
int16_t colId; // column id
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
uint16_t flag; // denote if it is a tag or a normal column
- char name[TSDB_COL_NAME_LEN]; // TODO remove it
+ char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1];
} SColIndex;
typedef struct SColumnFilterInfo {
diff --git a/src/inc/tbn.h b/src/inc/tbn.h
index b9f4e3c608a1ae3df3a4ea0dca32c7bf9d5820a9..b35f90eb153d02b4bb09f6f96a8dd09835626c97 100644
--- a/src/inc/tbn.h
+++ b/src/inc/tbn.h
@@ -31,6 +31,7 @@ void bnReset();
int32_t bnAllocVnodes(struct SVgObj *pVgroup);
int32_t bnAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
int32_t bnDropDnode(struct SDnodeObj *pDnode);
+int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode);
#ifdef __cplusplus
}
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 9dae862344b90580d36fc9fbba67a27cf60edc50..f31a5e36e8ba95ec12e9166471c1edd7098e58ce 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -69,6 +69,7 @@ int32_t vnodeInitMgmt();
void vnodeCleanupMgmt();
void* vnodeAcquire(int32_t vgId);
void vnodeRelease(void *pVnode);
+void* vnodeAcquireNotClose(int32_t vgId);
void* vnodeGetWal(void *pVnode);
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
void vnodeBuildStatusMsg(void *pStatus);
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 66e8cf73988ab25db7544b9a52215d2279630c63..7053052007c5e00a5ac001d72b64029dc08ddf8b 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index d9049454352efbd9344eae3c776f10c8f37fe090..794ca5e2de1820035524cc4180558b9f290c22c6 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h
index 3094bdb9ddb1ccd9debdbca88a34197385deb367..a08c1f48d11a8cd0e51fa5fb2d05a16da96d38c9 100644
--- a/src/kit/shell/inc/shellCommand.h
+++ b/src/kit/shell/inc/shellCommand.h
@@ -45,7 +45,7 @@ extern void updateBuffer(Command *cmd);
extern int isReadyGo(Command *cmd);
extern void resetCommand(Command *cmd, const char s[]);
-int countPrefixOnes(char c);
+int countPrefixOnes(unsigned char c);
void clearScreen(int ecmd_pos, int cursor_pos);
void printChar(char c, int times);
void positionCursor(int step, int direction);
diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c
index 9173ab0efdae7e5900218b2ab256993df71b21dd..e1a3dfe10205715d8c5cf8677a5be60c1a478b05 100644
--- a/src/kit/shell/src/shellCommand.c
+++ b/src/kit/shell/src/shellCommand.c
@@ -26,7 +26,7 @@ typedef struct {
char widthOnScreen;
} UTFCodeInfo;
-int countPrefixOnes(char c) {
+int countPrefixOnes(unsigned char c) {
unsigned char mask = 127;
mask = ~mask;
int ret = 0;
@@ -48,7 +48,7 @@ void getPrevCharSize(const char *str, int pos, int *size, int *width) {
while (--pos >= 0) {
*size += 1;
- if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break;
+ if (str[pos] > 0 || countPrefixOnes((unsigned char )str[pos]) > 1) break;
}
int rc = mbtowc(&wc, str + pos, MB_CUR_MAX);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index f6cb135dd11b20ff2b04b6b56e48c878b993b7e0..58f4b7ff02b673288878aa44671ba2a544556cc5 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -37,6 +37,13 @@ char PROMPT_HEADER[] = "power> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
+#elif (_TD_TQ_ == true)
+char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "tq> ";
+
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 4;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt
index 091eecfe270a20987a48f7223f57d2400169ac6d..584de340947035457abd985ac93697ed51c305af 100644
--- a/src/kit/taosdemo/CMakeLists.txt
+++ b/src/kit/taosdemo/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 38f0ecb219da9535a0549a4398e0a3ba30539598..9c547ff755b9543ffb8ac01969b12b311cbd8473 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -75,7 +75,7 @@ enum TEST_MODE {
#define MAX_RECORDS_PER_REQ 32766
-#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
+#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
@@ -84,26 +84,23 @@ enum TEST_MODE {
#define MAX_PASSWORD_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
-#define MAX_NUM_DATATYPE 10
+#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
-#define MAX_SAMPLES_ONCE_FROM_FILE 10000
-#define MAX_NUM_DATATYPE 10
+#define MAX_SAMPLES_ONCE_FROM_FILE 10000
+#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
-#define MAX_DB_COUNT 8
-#define MAX_SUPER_TABLE_COUNT 200
-#define MAX_COLUMN_COUNT 1024
-#define MAX_TAG_COUNT 128
+#define MAX_DB_COUNT 8
+#define MAX_SUPER_TABLE_COUNT 200
-#define MAX_QUERY_SQL_COUNT 100
-#define MAX_QUERY_SQL_LENGTH 1024
+#define MAX_QUERY_SQL_COUNT 100
+#define MAX_QUERY_SQL_LENGTH 1024
-#define MAX_DATABASE_COUNT 256
-#define INPUT_BUF_LEN 256
+#define MAX_DATABASE_COUNT 256
+#define INPUT_BUF_LEN 256
#define DEFAULT_TIMESTAMP_STEP 1
@@ -198,44 +195,45 @@ enum _describe_table_index {
static char *g_dupstr = NULL;
typedef struct SArguments_S {
- char * metaFile;
- uint32_t test_mode;
- char * host;
- uint16_t port;
- uint16_t iface;
- char * user;
- char * password;
- char * database;
- int replica;
- char * tb_prefix;
- char * sqlFile;
- bool use_metric;
- bool drop_database;
- bool insert_only;
- bool answer_yes;
- bool debug_print;
- bool verbose_print;
- bool performance_print;
- char * output_file;
- bool async_mode;
- char * datatype[MAX_NUM_DATATYPE + 1];
- uint32_t len_of_binary;
- uint32_t num_of_CPR;
- uint32_t num_of_threads;
- uint64_t insert_interval;
- int64_t query_times;
- uint32_t interlace_rows;
- uint32_t num_of_RPR; // num_of_records_per_req
- uint64_t max_sql_len;
- int64_t num_of_tables;
- int64_t num_of_DPT;
- int abort;
- uint32_t disorderRatio; // 0: no disorder, >0: x%
- int disorderRange; // ms or us by database precision
- uint32_t method_of_delete;
- char ** arg_list;
- uint64_t totalInsertRows;
- uint64_t totalAffectedRows;
+ char * metaFile;
+ uint32_t test_mode;
+ char * host;
+ uint16_t port;
+ uint16_t iface;
+ char * user;
+ char * password;
+ char * database;
+ int replica;
+ char * tb_prefix;
+ char * sqlFile;
+ bool use_metric;
+ bool drop_database;
+ bool insert_only;
+ bool answer_yes;
+ bool debug_print;
+ bool verbose_print;
+ bool performance_print;
+ char * output_file;
+ bool async_mode;
+ char * datatype[MAX_NUM_COLUMNS + 1];
+ uint32_t len_of_binary;
+ uint32_t num_of_CPR;
+ uint32_t num_of_threads;
+ uint64_t insert_interval;
+ int64_t query_times;
+ uint32_t interlace_rows;
+ uint32_t num_of_RPR; // num_of_records_per_req
+ uint64_t max_sql_len;
+ int64_t num_of_tables;
+ int64_t num_of_DPT;
+ int abort;
+ uint32_t disorderRatio; // 0: no disorder, >0: x%
+ int disorderRange; // ms or us by database precision
+ uint32_t method_of_delete;
+ char ** arg_list;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+ bool demo_mode; // use default column name and semi-random data
} SArguments;
typedef struct SColumn_S {
@@ -273,9 +271,9 @@ typedef struct SSuperTable_S {
char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
- StrColumn columns[MAX_COLUMN_COUNT];
+ StrColumn columns[TSDB_MAX_COLUMNS];
uint32_t tagCount;
- StrColumn tags[MAX_TAG_COUNT];
+ StrColumn tags[TSDB_MAX_TAGS];
char* childTblName;
char* colsOfCreateChildTable;
@@ -427,47 +425,47 @@ typedef struct SQueryMetaInfo_S {
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
- TAOS * taos;
- TAOS_STMT *stmt;
- int threadID;
- char db_name[TSDB_DB_NAME_LEN];
- uint32_t time_precision;
- char filePath[4096];
- FILE *fp;
- char tb_prefix[TSDB_TABLE_NAME_LEN];
- uint64_t start_table_from;
- uint64_t end_table_to;
- int64_t ntables;
- uint64_t data_of_rate;
- int64_t start_time;
- char* cols;
- bool use_metric;
- SSuperTable* superTblInfo;
- char *buffer; // sql cmd buffer
-
- // for async insert
- tsem_t lock_sem;
- int64_t counter;
- uint64_t st;
- uint64_t et;
- uint64_t lastTs;
-
- // sample data
- int64_t samplePos;
- // statistics
- uint64_t totalInsertRows;
- uint64_t totalAffectedRows;
-
- // insert delay statistics
- uint64_t cntDelay;
- uint64_t totalDelay;
- uint64_t avgDelay;
- uint64_t maxDelay;
- uint64_t minDelay;
-
- // seq of query or subscribe
- uint64_t querySeq; // sequence number of sql command
- TAOS_SUB* tsub;
+ TAOS * taos;
+ TAOS_STMT *stmt;
+ int threadID;
+ char db_name[TSDB_DB_NAME_LEN];
+ uint32_t time_precision;
+ char filePath[4096];
+ FILE *fp;
+ char tb_prefix[TSDB_TABLE_NAME_LEN];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ int64_t ntables;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
+ SSuperTable* superTblInfo;
+ char *buffer; // sql cmd buffer
+
+ // for async insert
+ tsem_t lock_sem;
+ int64_t counter;
+ uint64_t st;
+ uint64_t et;
+ uint64_t lastTs;
+
+ // sample data
+ int64_t samplePos;
+ // statistics
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+
+ // insert delay statistics
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
+
+ // seq of query or subscribe
+ uint64_t querySeq; // sequence number of sql command
+ TAOS_SUB* tsub;
} threadInfo;
@@ -552,6 +550,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr,
uint16_t port, char* sqlstr, threadInfo *pThreadInfo);
static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
int disorderRatio, int disorderRange);
+static bool getInfoFromJsonFile(char* file);
+static void init_rand_data();
/* ************ Global variables ************ */
@@ -562,52 +562,58 @@ double randdouble[MAX_PREPARED_RAND];
char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
"max(col0)", "min(col0)", "first(col0)", "last(col0)"};
+#define DEFAULT_DATATYPE_NUM 3
+
SArguments g_args = {
- NULL, // metaFile
- 0, // test_mode
- "127.0.0.1", // host
- 6030, // port
- TAOSC_IFACE, // iface
- "root", // user
- #ifdef _TD_POWER_
- "powerdb", // password
- #else
- "taosdata", // password
- #endif
- "test", // database
- 1, // replica
- "t", // tb_prefix
- NULL, // sqlFile
- true, // use_metric
- true, // drop_database
- true, // insert_only
- false, // debug_print
- false, // verbose_print
- false, // performance statistic print
- false, // answer_yes;
- "./output.txt", // output_file
- 0, // mode : sync or async
- {
- "INT", // datatype
- "INT", // datatype
- "INT", // datatype
- "INT", // datatype
- },
- 16, // len_of_binary
- 4, // num_of_CPR
- 10, // num_of_connections/thread
- 0, // insert_interval
- 1, // query_times
- 0, // interlace_rows;
- 30000, // num_of_RPR
- (1024*1024), // max_sql_len
- 10000, // num_of_tables
- 10000, // num_of_DPT
- 0, // abort
- 0, // disorderRatio
- 1000, // disorderRange
- 1, // method_of_delete
- NULL // arg_list
+ NULL, // metaFile
+ 0, // test_mode
+ "127.0.0.1", // host
+ 6030, // port
+ TAOSC_IFACE, // iface
+ "root", // user
+#ifdef _TD_POWER_
+ "powerdb", // password
+#elif (_TD_TQ_ == true)
+ "tqueue", // password
+#else
+ "taosdata", // password
+#endif
+ "test", // database
+ 1, // replica
+ "d", // tb_prefix
+ NULL, // sqlFile
+ true, // use_metric
+ true, // drop_database
+ true, // insert_only
+ false, // debug_print
+ false, // verbose_print
+ false, // performance statistic print
+ false, // answer_yes;
+ "./output.txt", // output_file
+ 0, // mode : sync or async
+ {
+ "FLOAT", // datatype
+ "INT", // datatype
+ "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3
+ },
+ 16, // len_of_binary
+ 4, // num_of_CPR
+ 10, // num_of_connections/thread
+ 0, // insert_interval
+ 1, // query_times
+ 0, // interlace_rows;
+ 30000, // num_of_RPR
+ (1024*1024), // max_sql_len
+ 10000, // num_of_tables
+ 10000, // num_of_DPT
+ 0, // abort
+ 0, // disorderRatio
+ 1000, // disorderRange
+ 1, // method_of_delete
+ NULL, // arg_list
+ 0, // totalInsertRows;
+ 0, // totalAffectedRows;
+ true, // demo_mode;
};
@@ -636,6 +642,9 @@ static FILE * g_fpOfInsertResult = NULL;
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
+// for strncpy buffer overflow
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
///////////////////////////////////////////////////
@@ -668,639 +677,680 @@ static void printVersion() {
}
static void printHelp() {
- char indent[10] = " ";
- printf("%s%s%s%s\n", indent, "-f", indent,
- "The meta file to the execution procedure. Default is './meta.json'.");
- printf("%s%s%s%s\n", indent, "-u", indent,
- "The TDengine user name to use when connecting to the server. Default is 'root'.");
+ char indent[10] = " ";
+ printf("%s%s%s%s\n", indent, "-f", indent,
+ "The meta file to the execution procedure. Default is './meta.json'.");
+ printf("%s%s%s%s\n", indent, "-u", indent,
+ "The TDengine user name to use when connecting to the server. Default is 'root'.");
#ifdef _TD_POWER_
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The password to use when connecting to the server. Default is 'powerdb'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/power/'.");
+ printf("%s%s%s%s\n", indent, "-P", indent,
+ "The password to use when connecting to the server. Default is 'powerdb'.");
+ printf("%s%s%s%s\n", indent, "-c", indent,
+ "Configuration directory. Default is '/etc/power/'.");
+#elif (_TD_TQ_ == true)
+ printf("%s%s%s%s\n", indent, "-P", indent,
+ "The password to use when connecting to the server. Default is 'tqueue'.");
+ printf("%s%s%s%s\n", indent, "-c", indent,
+ "Configuration directory. Default is '/etc/tq/'.");
#else
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The password to use when connecting to the server. Default is 'taosdata'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/taos/'.");
+ printf("%s%s%s%s\n", indent, "-P", indent,
+ "The password to use when connecting to the server. Default is 'taosdata'.");
+ printf("%s%s%s%s\n", indent, "-c", indent,
+ "Configuration directory. Default is '/etc/taos/'.");
#endif
- printf("%s%s%s%s\n", indent, "-h", indent,
- "The host to connect to TDengine. Default is localhost.");
- printf("%s%s%s%s\n", indent, "-p", indent,
- "The TCP/IP port number to use for the connection. Default is 0.");
- printf("%s%s%s%s\n", indent, "-I", indent,
+ printf("%s%s%s%s\n", indent, "-h", indent,
+ "The host to connect to TDengine. Default is localhost.");
+ printf("%s%s%s%s\n", indent, "-p", indent,
+ "The TCP/IP port number to use for the connection. Default is 0.");
+ printf("%s%s%s%s\n", indent, "-I", indent,
#if STMT_IFACE_ENABLED == 1
- "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.");
+ "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.");
#else
- "The interface (taosc, rest) taosdemo uses. Default is 'taosc'.");
+ "The interface (taosc, rest) taosdemo uses. Default is 'taosc'.");
#endif
- printf("%s%s%s%s\n", indent, "-d", indent,
- "Destination database. Default is 'test'.");
- printf("%s%s%s%s\n", indent, "-a", indent,
- "Set the replica parameters of the database, Default 1, min: 1, max: 3.");
- printf("%s%s%s%s\n", indent, "-m", indent,
- "Table prefix name. Default is 't'.");
- printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file.");
- printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag.");
- printf("%s%s%s%s\n", indent, "-o", indent,
- "Direct output to the named file. Default is './output.txt'.");
- printf("%s%s%s%s\n", indent, "-q", indent,
- "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
- printf("%s%s%s%s\n", indent, "-b", indent,
- "The data_type of columns, default: INT,INT,INT,INT.");
- printf("%s%s%s%s\n", indent, "-w", indent,
- "The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
- printf("%s%s%s%s%d\n", indent, "-l", indent,
- "The number of columns per record. Default is 4. Max values is ",
- MAX_NUM_DATATYPE);
- printf("%s%s%s%s\n", indent, "-T", indent,
- "The number of threads. Default is 10.");
- printf("%s%s%s%s\n", indent, "-i", indent,
- "The sleep time (ms) between insertion. Default is 0.");
- printf("%s%s%s%s\n", indent, "-r", indent,
- "The number of records per request. Default is 30000.");
- printf("%s%s%s%s\n", indent, "-t", indent,
- "The number of tables. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-n", indent,
- "The number of records per table. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag.");
- printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt.");
- printf("%s%s%s%s\n", indent, "-O", indent,
- "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.");
- printf("%s%s%s%s\n", indent, "-R", indent,
- "Out of order data's range, ms, default is 1000.");
- printf("%s%s%s%s\n", indent, "-g", indent,
- "Print debug info.");
- printf("%s%s%s\n", indent, "-V, --version\t",
- "Print version info.");
- printf("%s%s%s%s\n", indent, "--help\t", indent,
- "Print command line arguments list info.");
-/* printf("%s%s%s%s\n", indent, "-D", indent,
- "if elete database if exists. 0: no, 1: yes, default is 1");
+ printf("%s%s%s%s\n", indent, "-d", indent,
+ "Destination database. Default is 'test'.");
+ printf("%s%s%s%s\n", indent, "-a", indent,
+ "Set the replica parameters of the database, Default 1, min: 1, max: 3.");
+ printf("%s%s%s%s\n", indent, "-m", indent,
+ "Table prefix name. Default is 'd'.");
+ printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file.");
+ printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag.");
+ printf("%s%s%s%s\n", indent, "-o", indent,
+ "Direct output to the named file. Default is './output.txt'.");
+ printf("%s%s%s%s\n", indent, "-q", indent,
+ "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
+ printf("%s%s%s%s\n", indent, "-b", indent,
+ "The data_type of columns, default: FLOAT, INT, FLOAT.");
+ printf("%s%s%s%s\n", indent, "-w", indent,
+ "The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
+ printf("%s%s%s%s%d%s%d\n", indent, "-l", indent,
+ "The number of columns per record. Default is ",
+ DEFAULT_DATATYPE_NUM,
+ ". Max values is ",
+ MAX_NUM_COLUMNS);
+ printf("%s%s%s%s\n", indent, indent, indent,
+ "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
+ printf("%s%s%s%s\n", indent, "-T", indent,
+ "The number of threads. Default is 10.");
+ printf("%s%s%s%s\n", indent, "-i", indent,
+ "The sleep time (ms) between insertion. Default is 0.");
+ printf("%s%s%s%s\n", indent, "-r", indent,
+ "The number of records per request. Default is 30000.");
+ printf("%s%s%s%s\n", indent, "-t", indent,
+ "The number of tables. Default is 10000.");
+ printf("%s%s%s%s\n", indent, "-n", indent,
+ "The number of records per table. Default is 10000.");
+ printf("%s%s%s%s\n", indent, "-M", indent,
+ "The value of records generated are totally random.");
+ printf("%s%s%s%s\n", indent, indent, indent,
+ " The default is to simulate power equipment senario.");
+ printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag.");
+ printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt.");
+ printf("%s%s%s%s\n", indent, "-O", indent,
+ "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.");
+ printf("%s%s%s%s\n", indent, "-R", indent,
+ "Out of order data's range, ms, default is 1000.");
+ printf("%s%s%s%s\n", indent, "-g", indent,
+ "Print debug info.");
+ printf("%s%s%s\n", indent, "-V, --version\t",
+ "Print version info.");
+ printf("%s%s%s%s\n", indent, "--help\t", indent,
+ "Print command line arguments list info.");
+ /* printf("%s%s%s%s\n", indent, "-D", indent,
+ "Delete database if exists. 0: no, 1: yes, default is 1");
*/
}
static bool isStringNumber(char *input)
{
- int len = strlen(input);
- if (0 == len) {
- return false;
- }
+ int len = strlen(input);
+ if (0 == len) {
+ return false;
+ }
- for (int i = 0; i < len; i++) {
- if (!isdigit(input[i]))
- return false;
- }
+ for (int i = 0; i < len; i++) {
+ if (!isdigit(input[i]))
+ return false;
+ }
- return true;
+ return true;
}
static void parse_args(int argc, char *argv[], SArguments *arguments) {
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-f") == 0) {
- arguments->metaFile = argv[++i];
- } else if (strcmp(argv[i], "-c") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-c need a valid path following!\n");
- exit(EXIT_FAILURE);
- }
- tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
- } else if (strcmp(argv[i], "-h") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-h need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->host = argv[++i];
- } else if (strcmp(argv[i], "-p") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-p need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->port = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-I") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-I need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- ++i;
- if (0 == strcasecmp(argv[i], "taosc")) {
- arguments->iface = TAOSC_IFACE;
- } else if (0 == strcasecmp(argv[i], "rest")) {
- arguments->iface = REST_IFACE;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-f") == 0) {
+ arguments->demo_mode = false;
+ arguments->metaFile = argv[++i];
+ } else if (strcmp(argv[i], "-c") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-c need a valid path following!\n");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (strcmp(argv[i], "-h") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-h need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (strcmp(argv[i], "-p") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-p need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-I") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-I need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ ++i;
+ if (0 == strcasecmp(argv[i], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i], "rest")) {
+ arguments->iface = REST_IFACE;
#if STMT_IFACE_ENABLED == 1
- } else if (0 == strcasecmp(argv[i], "stmt")) {
- arguments->iface = STMT_IFACE;
+ } else if (0 == strcasecmp(argv[i], "stmt")) {
+ arguments->iface = STMT_IFACE;
#endif
- } else {
- errorPrint("%s", "\n\t-I need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- } else if (strcmp(argv[i], "-u") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-u need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->user = argv[++i];
- } else if (strcmp(argv[i], "-P") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-P need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->password = argv[++i];
- } else if (strcmp(argv[i], "-o") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-o need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->output_file = argv[++i];
- } else if (strcmp(argv[i], "-s") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-s need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->sqlFile = argv[++i];
- } else if (strcmp(argv[i], "-q") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n");
- exit(EXIT_FAILURE);
- }
- arguments->async_mode = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-T") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-T need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->num_of_threads = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-i") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-i need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->insert_interval = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-qt") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-qt need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->query_times = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-B") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-B need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->interlace_rows = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-r") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-r need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->num_of_RPR = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-t") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-t need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->num_of_tables = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-n") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-n need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->num_of_DPT = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-d") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-d need a valid string following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->database = argv[++i];
- } else if (strcmp(argv[i], "-l") == 0) {
- if (argc == i+1) {
- if (!isStringNumber(argv[i+1])) {
- printHelp();
- errorPrint("%s", "\n\t-l need a number following!\n");
- exit(EXIT_FAILURE);
- }
- }
- arguments->num_of_CPR = atoi(argv[++i]);
+ } else {
+ errorPrint("%s", "\n\t-I need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-u") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-u need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else if (strcmp(argv[i], "-P") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-P need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->password = argv[++i];
+ } else if (strcmp(argv[i], "-o") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-o need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else if (strcmp(argv[i], "-s") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-s need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else if (strcmp(argv[i], "-q") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-T") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-T need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->num_of_threads = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-i") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-i need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-qt") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-qt need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->query_times = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-B") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-B need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlace_rows = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-r") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-r need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->num_of_RPR = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-t") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-t need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->num_of_tables = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-n") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-n need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->num_of_DPT = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-d") == 0) {
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-d need a valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else if (strcmp(argv[i], "-l") == 0) {
+ arguments->demo_mode = false;
+ if (argc == i+1) {
+ if (!isStringNumber(argv[i+1])) {
+ printHelp();
+ errorPrint("%s", "\n\t-l need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+ arguments->num_of_CPR = atoi(argv[++i]);
- if (arguments->num_of_CPR > MAX_NUM_DATATYPE) {
- printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE);
- prompt();
- arguments->num_of_CPR = MAX_NUM_DATATYPE;
- }
+ if (arguments->num_of_CPR > MAX_NUM_COLUMNS) {
+ printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS);
+ prompt();
+ arguments->num_of_CPR = MAX_NUM_COLUMNS;
+ }
- for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) {
- arguments->datatype[col] = NULL;
- }
+ for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) {
+ arguments->datatype[col] = "INT";
+ }
+ for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
+ arguments->datatype[col] = NULL;
+ }
+ } else if (strcmp(argv[i], "-b") == 0) {
+ arguments->demo_mode = false;
+ if (argc == i+1) {
+ printHelp();
+ errorPrint("%s", "\n\t-b need valid string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ ++i;
+ if (strstr(argv[i], ",") == NULL) {
+ // only one col
+ if (strcasecmp(argv[i], "INT")
+ && strcasecmp(argv[i], "FLOAT")
+ && strcasecmp(argv[i], "TINYINT")
+ && strcasecmp(argv[i], "BOOL")
+ && strcasecmp(argv[i], "SMALLINT")
+ && strcasecmp(argv[i], "BIGINT")
+ && strcasecmp(argv[i], "DOUBLE")
+ && strcasecmp(argv[i], "BINARY")
+ && strcasecmp(argv[i], "TIMESTAMP")
+ && strcasecmp(argv[i], "NCHAR")) {
+ printHelp();
+ errorPrint("%s", "-b: Invalid data_type!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->datatype[0] = argv[i];
+ } else {
+ // more than one col
+ int index = 0;
+ g_dupstr = strdup(argv[i]);
+ char *running = g_dupstr;
+ char *token = strsep(&running, ",");
+ while(token != NULL) {
+ if (strcasecmp(token, "INT")
+ && strcasecmp(token, "FLOAT")
+ && strcasecmp(token, "TINYINT")
+ && strcasecmp(token, "BOOL")
+ && strcasecmp(token, "SMALLINT")
+ && strcasecmp(token, "BIGINT")
+ && strcasecmp(token, "DOUBLE")
+ && strcasecmp(token, "BINARY")
+ && strcasecmp(token, "TIMESTAMP")
+ && strcasecmp(token, "NCHAR")) {
+ printHelp();
+ free(g_dupstr);
+ errorPrint("%s", "-b: Invalid data_type!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->datatype[index++] = token;
+ token = strsep(&running, ",");
+ if (index >= MAX_NUM_COLUMNS) break;
+ }
+ arguments->datatype[index] = NULL;
+ }
+ } else if (strcmp(argv[i], "-w") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-w need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->len_of_binary = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-m") == 0) {
+ if ((argc == i+1) ||
+ (isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-m need a letter-initial string following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else if (strcmp(argv[i], "-N") == 0) {
+ arguments->use_metric = false;
+ } else if (strcmp(argv[i], "-M") == 0) {
+ arguments->demo_mode = false;
+ } else if (strcmp(argv[i], "-x") == 0) {
+ arguments->insert_only = false;
+ } else if (strcmp(argv[i], "-y") == 0) {
+ arguments->answer_yes = true;
+ } else if (strcmp(argv[i], "-g") == 0) {
+ arguments->debug_print = true;
+ } else if (strcmp(argv[i], "-gg") == 0) {
+ arguments->verbose_print = true;
+ } else if (strcmp(argv[i], "-pp") == 0) {
+ arguments->performance_print = true;
+ } else if (strcmp(argv[i], "-O") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-O need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
- } else if (strcmp(argv[i], "-b") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-b need valid string following!\n");
- exit(EXIT_FAILURE);
- }
- ++i;
- if (strstr(argv[i], ",") == NULL) {
- // only one col
- if (strcasecmp(argv[i], "INT")
- && strcasecmp(argv[i], "FLOAT")
- && strcasecmp(argv[i], "TINYINT")
- && strcasecmp(argv[i], "BOOL")
- && strcasecmp(argv[i], "SMALLINT")
- && strcasecmp(argv[i], "BIGINT")
- && strcasecmp(argv[i], "DOUBLE")
- && strcasecmp(argv[i], "BINARY")
- && strcasecmp(argv[i], "TIMESTAMP")
- && strcasecmp(argv[i], "NCHAR")) {
- printHelp();
- errorPrint("%s", "-b: Invalid data_type!\n");
- exit(EXIT_FAILURE);
- }
- arguments->datatype[0] = argv[i];
- } else {
- // more than one col
- int index = 0;
- g_dupstr = strdup(argv[i]);
- char *running = g_dupstr;
- char *token = strsep(&running, ",");
- while(token != NULL) {
- if (strcasecmp(token, "INT")
- && strcasecmp(token, "FLOAT")
- && strcasecmp(token, "TINYINT")
- && strcasecmp(token, "BOOL")
- && strcasecmp(token, "SMALLINT")
- && strcasecmp(token, "BIGINT")
- && strcasecmp(token, "DOUBLE")
- && strcasecmp(token, "BINARY")
- && strcasecmp(token, "TIMESTAMP")
- && strcasecmp(token, "NCHAR")) {
- printHelp();
- free(g_dupstr);
- errorPrint("%s", "-b: Invalid data_type!\n");
- exit(EXIT_FAILURE);
- }
- arguments->datatype[index++] = token;
- token = strsep(&running, ",");
- if (index >= MAX_NUM_DATATYPE) break;
- }
- arguments->datatype[index] = NULL;
- }
- } else if (strcmp(argv[i], "-w") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-w need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->len_of_binary = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-m") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-m need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->tb_prefix = argv[++i];
- } else if (strcmp(argv[i], "-N") == 0) {
- arguments->use_metric = false;
- } else if (strcmp(argv[i], "-x") == 0) {
- arguments->insert_only = false;
- } else if (strcmp(argv[i], "-y") == 0) {
- arguments->answer_yes = true;
- } else if (strcmp(argv[i], "-g") == 0) {
- arguments->debug_print = true;
- } else if (strcmp(argv[i], "-gg") == 0) {
- arguments->verbose_print = true;
- } else if (strcmp(argv[i], "-pp") == 0) {
- arguments->performance_print = true;
- } else if (strcmp(argv[i], "-O") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-O need a number following!\n");
- exit(EXIT_FAILURE);
- }
+ arguments->disorderRatio = atoi(argv[++i]);
- arguments->disorderRatio = atoi(argv[++i]);
+ if (arguments->disorderRatio > 50) {
+ arguments->disorderRatio = 50;
+ }
- if (arguments->disorderRatio > 50) {
- arguments->disorderRatio = 50;
- }
+ if (arguments->disorderRatio < 0) {
+ arguments->disorderRatio = 0;
+ }
- if (arguments->disorderRatio < 0) {
- arguments->disorderRatio = 0;
- }
+ } else if (strcmp(argv[i], "-R") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-R need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
- } else if (strcmp(argv[i], "-R") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-R need a number following!\n");
- exit(EXIT_FAILURE);
- }
+ arguments->disorderRange = atoi(argv[++i]);
+ if (arguments->disorderRange < 0)
+ arguments->disorderRange = 1000;
- arguments->disorderRange = atoi(argv[++i]);
- if (arguments->disorderRange < 0)
- arguments->disorderRange = 1000;
+ } else if (strcmp(argv[i], "-a") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-a need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ if (arguments->replica > 3 || arguments->replica < 1) {
+ arguments->replica = 1;
+ }
+ } else if (strcmp(argv[i], "-D") == 0) {
+ arguments->method_of_delete = atoi(argv[++i]);
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ } else if ((strcmp(argv[i], "--version") == 0) ||
+ (strcmp(argv[i], "-V") == 0)){
+ printVersion();
+ exit(0);
+ } else if (strcmp(argv[i], "--help") == 0) {
+ printHelp();
+ exit(0);
+ } else {
+ printHelp();
+ errorPrint("%s", "ERROR: wrong options\n");
+ exit(EXIT_FAILURE);
+ }
+ }
- } else if (strcmp(argv[i], "-a") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-a need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->replica = atoi(argv[++i]);
- if (arguments->replica > 3 || arguments->replica < 1) {
- arguments->replica = 1;
- }
- } else if (strcmp(argv[i], "-D") == 0) {
- arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete > 3) {
- errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
- exit(EXIT_FAILURE);
- }
- } else if ((strcmp(argv[i], "--version") == 0) ||
- (strcmp(argv[i], "-V") == 0)){
- printVersion();
- exit(0);
- } else if (strcmp(argv[i], "--help") == 0) {
- printHelp();
- exit(0);
- } else {
- printHelp();
- errorPrint("%s", "ERROR: wrong options\n");
- exit(EXIT_FAILURE);
+ int columnCount;
+ for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
+ if (g_args.datatype[columnCount] == NULL) {
+ break;
+ }
}
- }
- int columnCount;
- for (columnCount = 0; columnCount < MAX_NUM_DATATYPE; columnCount ++) {
- if (g_args.datatype[columnCount] == NULL) {
- break;
+ if (0 == columnCount) {
+ perror("data type error!");
+ exit(-1);
}
- }
+ g_args.num_of_CPR = columnCount;
+
+ if (((arguments->debug_print) && (arguments->metaFile == NULL))
+ || arguments->verbose_print) {
+ printf("###################################################################\n");
+ printf("# meta file: %s\n", arguments->metaFile);
+ printf("# Server IP: %s:%hu\n",
+ arguments->host == NULL ? "localhost" : arguments->host,
+ arguments->port );
+ printf("# User: %s\n", arguments->user);
+ printf("# Password: %s\n", arguments->password);
+ printf("# Use metric: %s\n",
+ arguments->use_metric ? "true" : "false");
+ if (*(arguments->datatype)) {
+ printf("# Specified data type: ");
+ for (int i = 0; i < MAX_NUM_COLUMNS; i++)
+ if (arguments->datatype[i])
+ printf("%s,", arguments->datatype[i]);
+ else
+ break;
+ printf("\n");
+ }
+ printf("# Insertion interval: %"PRIu64"\n",
+ arguments->insert_interval);
+ printf("# Number of records per req: %u\n",
+ arguments->num_of_RPR);
+ printf("# Max SQL length: %"PRIu64"\n",
+ arguments->max_sql_len);
+ printf("# Length of Binary: %d\n", arguments->len_of_binary);
+ printf("# Number of Threads: %d\n", arguments->num_of_threads);
+ printf("# Number of Tables: %"PRId64"\n",
+ arguments->num_of_tables);
+ printf("# Number of Data per Table: %"PRId64"\n",
+ arguments->num_of_DPT);
+ printf("# Database name: %s\n", arguments->database);
+ printf("# Table prefix: %s\n", arguments->tb_prefix);
+ if (arguments->disorderRatio) {
+ printf("# Data order: %d\n", arguments->disorderRatio);
+ printf("# Data out of order rate: %d\n", arguments->disorderRange);
- if (0 == columnCount) {
- perror("data type error!");
- exit(-1);
- }
- g_args.num_of_CPR = columnCount;
-
- if (((arguments->debug_print) && (arguments->metaFile == NULL))
- || arguments->verbose_print) {
- printf("###################################################################\n");
- printf("# meta file: %s\n", arguments->metaFile);
- printf("# Server IP: %s:%hu\n",
- arguments->host == NULL ? "localhost" : arguments->host,
- arguments->port );
- printf("# User: %s\n", arguments->user);
- printf("# Password: %s\n", arguments->password);
- printf("# Use metric: %s\n",
- arguments->use_metric ? "true" : "false");
- if (*(arguments->datatype)) {
- printf("# Specified data type: ");
- for (int i = 0; i < MAX_NUM_DATATYPE; i++)
- if (arguments->datatype[i])
- printf("%s,", arguments->datatype[i]);
- else
- break;
- printf("\n");
+ }
+ printf("# Delete method: %d\n", arguments->method_of_delete);
+ printf("# Answer yes when prompt: %d\n", arguments->answer_yes);
+ printf("# Print debug info: %d\n", arguments->debug_print);
+ printf("# Print verbose info: %d\n", arguments->verbose_print);
+ printf("###################################################################\n");
+
+ prompt();
}
- printf("# Insertion interval: %"PRIu64"\n",
- arguments->insert_interval);
- printf("# Number of records per req: %u\n",
- arguments->num_of_RPR);
- printf("# Max SQL length: %"PRIu64"\n",
- arguments->max_sql_len);
- printf("# Length of Binary: %d\n", arguments->len_of_binary);
- printf("# Number of Threads: %d\n", arguments->num_of_threads);
- printf("# Number of Tables: %"PRId64"\n",
- arguments->num_of_tables);
- printf("# Number of Data per Table: %"PRId64"\n",
- arguments->num_of_DPT);
- printf("# Database name: %s\n", arguments->database);
- printf("# Table prefix: %s\n", arguments->tb_prefix);
- if (arguments->disorderRatio) {
- printf("# Data order: %d\n", arguments->disorderRatio);
- printf("# Data out of order rate: %d\n", arguments->disorderRange);
-
- }
- printf("# Delete method: %d\n", arguments->method_of_delete);
- printf("# Answer yes when prompt: %d\n", arguments->answer_yes);
- printf("# Print debug info: %d\n", arguments->debug_print);
- printf("# Print verbose info: %d\n", arguments->verbose_print);
- printf("###################################################################\n");
-
- prompt();
- }
}
-static bool getInfoFromJsonFile(char* file);
-static void init_rand_data();
static void tmfclose(FILE *fp) {
- if (NULL != fp) {
- fclose(fp);
- }
+ if (NULL != fp) {
+ fclose(fp);
+ }
}
static void tmfree(char *buf) {
- if (NULL != buf) {
- free(buf);
- }
+ if (NULL != buf) {
+ free(buf);
+ }
}
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
+ int i;
+ TAOS_RES *res = NULL;
+ int32_t code = -1;
+
+ for (i = 0; i < 5 /* retry */; i++) {
+ if (NULL != res) {
+ taos_free_result(res);
+ res = NULL;
+ }
- for (i = 0; i < 5 /* retry */; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
+ res = taos_query(taos, command);
+ code = taos_errno(res);
+ if (0 == code) {
+ break;
+ }
}
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
+ verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
+ if (code != 0) {
+ if (!quiet) {
+ errorPrint("Failed to execute %s, reason: %s\n",
+ command, taos_errstr(res));
+ }
+ taos_free_result(res);
+ //taos_close(taos);
+ return -1;
}
- }
- verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
- if (code != 0) {
- if (!quiet) {
- errorPrint("Failed to execute %s, reason: %s\n",
- command, taos_errstr(res));
+ if (INSERT_TYPE == type) {
+ int affectedRows = taos_affected_rows(res);
+ taos_free_result(res);
+ return affectedRows;
}
- taos_free_result(res);
- //taos_close(taos);
- return -1;
- }
- if (INSERT_TYPE == type) {
- int affectedRows = taos_affected_rows(res);
taos_free_result(res);
- return affectedRows;
- }
-
- taos_free_result(res);
- return 0;
+ return 0;
}
static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{
- pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
- if (pThreadInfo->fp == NULL) {
- errorPrint(
- "%s() LN%d, failed to open result file: %s, result will not save to file\n",
- __func__, __LINE__, pThreadInfo->filePath);
- return;
- }
+ pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
+ if (pThreadInfo->fp == NULL) {
+ errorPrint(
+ "%s() LN%d, failed to open result file: %s, result will not save to file\n",
+ __func__, __LINE__, pThreadInfo->filePath);
+ return;
+ }
- fprintf(pThreadInfo->fp, "%s", resultBuf);
- tmfclose(pThreadInfo->fp);
- pThreadInfo->fp = NULL;
+ fprintf(pThreadInfo->fp, "%s", resultBuf);
+ tmfclose(pThreadInfo->fp);
+ pThreadInfo->fp = NULL;
}
static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
- TAOS_ROW row = NULL;
- int num_rows = 0;
- int num_fields = taos_field_count(res);
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- char* databuf = (char*) calloc(1, 100*1024*1024);
- if (databuf == NULL) {
- errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
- __func__, __LINE__);
- return ;
- }
+ TAOS_ROW row = NULL;
+ int num_rows = 0;
+ int num_fields = taos_field_count(res);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ char* databuf = (char*) calloc(1, 100*1024*1024);
+ if (databuf == NULL) {
+ errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
+ __func__, __LINE__);
+ return ;
+ }
- int totalLen = 0;
+ int64_t totalLen = 0;
- // fetch the records row by row
- while((row = taos_fetch_row(res))) {
- if ((strlen(pThreadInfo->filePath) > 0)
- && (totalLen >= 100*1024*1024 - 32000)) {
+ // fetch the records row by row
+ while((row = taos_fetch_row(res))) {
+ if (totalLen >= 100*1024*1024 - 32000) {
+ if (strlen(pThreadInfo->filePath) > 0)
+ appendResultBufToFile(databuf, pThreadInfo);
+ totalLen = 0;
+ memset(databuf, 0, 100*1024*1024);
+ }
+ num_rows++;
+ char temp[16000] = {0};
+ int len = taos_print_row(temp, row, fields, num_fields);
+ len += sprintf(temp + len, "\n");
+ //printf("query result:%s\n", temp);
+ memcpy(databuf + totalLen, temp, len);
+ totalLen += len;
+ verbosePrint("%s() LN%d, totalLen: %"PRId64"\n",
+ __func__, __LINE__, totalLen);
+ }
+
+ verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
+ __func__, __LINE__, databuf, pThreadInfo->filePath);
+ if (strlen(pThreadInfo->filePath) > 0) {
appendResultBufToFile(databuf, pThreadInfo);
- totalLen = 0;
- memset(databuf, 0, 100*1024*1024);
}
- num_rows++;
- char temp[16000] = {0};
- int len = taos_print_row(temp, row, fields, num_fields);
- len += sprintf(temp + len, "\n");
- //printf("query result:%s\n", temp);
- memcpy(databuf + totalLen, temp, len);
- totalLen += len;
- }
-
- verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
- __func__, __LINE__, databuf, pThreadInfo->filePath);
- if (strlen(pThreadInfo->filePath) > 0) {
- appendResultBufToFile(databuf, pThreadInfo);
- }
- free(databuf);
+ free(databuf);
}
static void selectAndGetResult(
threadInfo *pThreadInfo, char *command)
{
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
- TAOS_RES *res = taos_query(pThreadInfo->taos, command);
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return;
- }
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
- fetchResult(res, pThreadInfo);
- taos_free_result(res);
+ fetchResult(res, pThreadInfo);
+ taos_free_result(res);
- } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
- int retCode = postProceSql(
- g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
- command,
- pThreadInfo);
- if (0 != retCode) {
- printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
- }
+ } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ int retCode = postProceSql(
+ g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
+ command,
+ pThreadInfo);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ }
- } else {
- errorPrint("%s() LN%d, unknown query mode: %s\n",
- __func__, __LINE__, g_queryInfo.queryMode);
- }
+ } else {
+ errorPrint("%s() LN%d, unknown query mode: %s\n",
+ __func__, __LINE__, g_queryInfo.queryMode);
+ }
}
static int32_t rand_bool(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 2;
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 2;
}
static int32_t rand_tinyint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 128;
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 128;
}
static int32_t rand_smallint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 32767;
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor] % 32767;
}
static int32_t rand_int(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor];
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randint[cursor];
}
static int64_t rand_bigint(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randbigint[cursor];
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randbigint[cursor];
}
static float rand_float(){
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randfloat[cursor];
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randfloat[cursor];
+}
+
+static float demo_current_float(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return (float)(9.8 + 0.04 * (randint[cursor] % 10) + randfloat[cursor]/1000000000);
+}
+
+static int32_t demo_voltage_int(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return 215 + randint[cursor] % 10;
+}
+
+static float demo_phase_float(){
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return (float)((115 + randint[cursor] % 10 + randfloat[cursor]/1000000000)/360);
}
#if 0
@@ -1323,33 +1373,32 @@ static void nonrand_string(char *str, int size)
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
static void rand_string(char *str, int size) {
- str[0] = 0;
- if (size > 0) {
- //--size;
- int n;
- for (n = 0; n < size; n++) {
- int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
- str[n] = charset[key];
+ str[0] = 0;
+ if (size > 0) {
+ //--size;
+ int n;
+ for (n = 0; n < size; n++) {
+ int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
+ str[n] = charset[key];
+ }
+ str[n] = 0;
}
- str[n] = 0;
- }
}
static double rand_double() {
- static int cursor;
- cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return randdouble[cursor];
-
+ static int cursor;
+ cursor++;
+ cursor = cursor % MAX_PREPARED_RAND;
+ return randdouble[cursor];
}
static void init_rand_data() {
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
- randint[i] = (int)(taosRandom() % 65535);
- randbigint[i] = (int64_t)(taosRandom() % 2147483648);
- randfloat[i] = (float)(taosRandom() / 1000.0);
- randdouble[i] = (double)(taosRandom() / 1000000.0);
- }
+ for (int i = 0; i < MAX_PREPARED_RAND; i++){
+ randint[i] = (int)(taosRandom() % 65535);
+ randbigint[i] = (int64_t)(taosRandom() % 2147483648);
+ randfloat[i] = (float)(taosRandom() / 1000.0);
+ randdouble[i] = (double)(taosRandom() / 1000000.0);
+ }
}
#define SHOW_PARSE_RESULT_START() \
@@ -1375,759 +1424,767 @@ static void init_rand_data() {
static int printfInsertMeta() {
SHOW_PARSE_RESULT_START();
- printf("interface: \033[33m%s\033[0m\n",
- (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
- printf("host: \033[33m%s:%u\033[0m\n",
- g_Dbs.host, g_Dbs.port);
- printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
- printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
- printf("configDir: \033[33m%s\033[0m\n", configDir);
- printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
- printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
- printf("thread num of create table: \033[33m%d\033[0m\n",
- g_Dbs.threadCountByCreateTbl);
- printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
- g_args.insert_interval);
- printf("number of records per req: \033[33m%u\033[0m\n",
- g_args.num_of_RPR);
- printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
- g_args.max_sql_len);
-
- printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
-
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- printf("database[\033[33m%d\033[0m]:\n", i);
- printf(" database[%d] name: \033[33m%s\033[0m\n",
- i, g_Dbs.db[i].dbName);
- if (0 == g_Dbs.db[i].drop) {
- printf(" drop: \033[33mno\033[0m\n");
- } else {
- printf(" drop: \033[33myes\033[0m\n");
- }
-
- if (g_Dbs.db[i].dbCfg.blocks > 0) {
- printf(" blocks: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.blocks);
- }
- if (g_Dbs.db[i].dbCfg.cache > 0) {
- printf(" cache: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.cache);
- }
- if (g_Dbs.db[i].dbCfg.days > 0) {
- printf(" days: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.days);
- }
- if (g_Dbs.db[i].dbCfg.keep > 0) {
- printf(" keep: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.keep);
- }
- if (g_Dbs.db[i].dbCfg.replica > 0) {
- printf(" replica: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.replica);
- }
- if (g_Dbs.db[i].dbCfg.update > 0) {
- printf(" update: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.update);
- }
- if (g_Dbs.db[i].dbCfg.minRows > 0) {
- printf(" minRows: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.minRows);
- }
- if (g_Dbs.db[i].dbCfg.maxRows > 0) {
- printf(" maxRows: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.maxRows);
- }
- if (g_Dbs.db[i].dbCfg.comp > 0) {
- printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
- }
- if (g_Dbs.db[i].dbCfg.walLevel > 0) {
- printf(" walLevel: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.walLevel);
- }
- if (g_Dbs.db[i].dbCfg.fsync > 0) {
- printf(" fsync: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.fsync);
- }
- if (g_Dbs.db[i].dbCfg.quorum > 0) {
- printf(" quorum: \033[33m%d\033[0m\n",
- g_Dbs.db[i].dbCfg.quorum);
- }
- if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
- || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- printf(" precision: \033[33m%s\033[0m\n",
- g_Dbs.db[i].dbCfg.precision);
- } else {
- printf("\033[1m\033[40;31m precision error: %s\033[0m\n",
- g_Dbs.db[i].dbCfg.precision);
- return -1;
- }
- }
+ if (g_args.demo_mode)
+ printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n");
+ else
+ printf("\ntaosdemo is simulating random data as you request..\n\n");
+
+ printf("interface: \033[33m%s\033[0m\n",
+ (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
+ printf("host: \033[33m%s:%u\033[0m\n",
+ g_Dbs.host, g_Dbs.port);
+ printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
+ printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
+ printf("configDir: \033[33m%s\033[0m\n", configDir);
+ printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
+ printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
+ printf("thread num of create table: \033[33m%d\033[0m\n",
+ g_Dbs.threadCountByCreateTbl);
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_args.insert_interval);
+ printf("number of records per req: \033[33m%u\033[0m\n",
+ g_args.num_of_RPR);
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
+ g_args.max_sql_len);
+
+ printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
- printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
- g_Dbs.db[i].superTblCount);
- for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ printf("database[\033[33m%d\033[0m]:\n", i);
+ printf(" database[%d] name: \033[33m%s\033[0m\n",
+ i, g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ printf(" drop: \033[33mno\033[0m\n");
+ } else {
+ printf(" drop: \033[33myes\033[0m\n");
+ }
- printf(" stbName: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ printf(" blocks: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ printf(" cache: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ printf(" days: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ printf(" keep: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ printf(" replica: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ printf(" update: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ printf(" minRows: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ printf(" maxRows: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ printf(" walLevel: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ printf(" fsync: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ printf(" quorum: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ printf(" precision: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].dbCfg.precision);
+ } else {
+ printf("\033[1m\033[40;31m precision error: %s\033[0m\n",
+ g_Dbs.db[i].dbCfg.precision);
+ return -1;
+ }
+ }
- if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
- } else if (AUTO_CREATE_SUBTBL ==
- g_Dbs.db[i].superTbls[j].autoCreateTable) {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes");
- } else {
- printf(" autoCreateTable: \033[33m%s\033[0m\n", "error");
- }
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTblCount);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
- if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- printf(" childTblExists: \033[33m%s\033[0m\n", "no");
- } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- printf(" childTblExists: \033[33m%s\033[0m\n", "yes");
- } else {
- printf(" childTblExists: \033[33m%s\033[0m\n", "error");
- }
+ printf(" stbName: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sTblName);
- printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblCount);
- printf(" childTblPrefix: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblPrefix);
- printf(" dataSource: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].dataSource);
- printf(" iface: \033[33m%s\033[0m\n",
- (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
- (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
- if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
- printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblLimit);
- }
- if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
- printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].childTblOffset);
- }
- printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertRows);
-/*
- if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
- }else {
- printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
- }
- */
- printf(" interlaceRows: \033[33m%u\033[0m\n",
- g_Dbs.db[i].superTbls[j].interlaceRows);
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
+ } else if (AUTO_CREATE_SUBTBL ==
+ g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "error");
+ }
- if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].insertInterval);
- }
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "no");
+ } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "error");
+ }
- printf(" disorderRange: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].disorderRange);
- printf(" disorderRatio: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].maxSqlLen);
- printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
- g_Dbs.db[i].superTbls[j].timeStampStep);
- printf(" startTimestamp: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].startTimestamp);
- printf(" sampleFormat: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sampleFormat);
- printf(" sampleFile: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sampleFile);
- printf(" tagsFile: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].tagsFile);
- printf(" columnCount: \033[33m%d\033[0m\n",
- g_Dbs.db[i].superTbls[j].columnCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "binary", 6))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "nchar", 5))) {
- printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k,
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- } else {
- printf("column[%d]:\033[33m%s\033[0m ", k,
- g_Dbs.db[i].superTbls[j].columns[k].dataType);
- }
- }
- printf("\n");
+ printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblCount);
+ printf(" childTblPrefix: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblPrefix);
+ printf(" dataSource: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].dataSource);
+ printf(" iface: \033[33m%s\033[0m\n",
+ (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
+ if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
+ printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblLimit);
+ }
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblOffset);
+ }
+ printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+ /*
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
+ }else {
+ printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
+ }
+ */
+ printf(" interlaceRows: \033[33m%u\033[0m\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+
+ if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertInterval);
+ }
- printf(" tagCount: \033[33m%d\033[0m\n ",
- g_Dbs.db[i].superTbls[j].tagCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "nchar", strlen("nchar")))) {
- printf("tag[%d]:\033[33m%s(%d)\033[0m ", k,
- g_Dbs.db[i].superTbls[j].tags[k].dataType,
- g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- } else {
- printf("tag[%d]:\033[33m%s\033[0m ", k,
- g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ printf(" disorderRange: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTbls[j].disorderRange);
+ printf(" disorderRatio: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
+ printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
+ printf(" startTimestamp: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].startTimestamp);
+ printf(" sampleFormat: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sampleFormat);
+ printf(" sampleFile: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sampleFile);
+ printf(" tagsFile: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].tagsFile);
+ printf(" columnCount: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "binary", 6))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", 5))) {
+ printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ printf("column[%d]:\033[33m%s\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ printf("\n");
+
+ printf(" tagCount: \033[33m%d\033[0m\n ",
+ g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
+ printf("tag[%d]:\033[33m%s(%d)\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ printf("tag[%d]:\033[33m%s\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ printf("\n");
}
- }
- printf("\n");
+ printf("\n");
}
- printf("\n");
- }
- SHOW_PARSE_RESULT_END();
+ SHOW_PARSE_RESULT_END();
- return 0;
+ return 0;
}
static void printfInsertMetaToFile(FILE* fp) {
- SHOW_PARSE_RESULT_START_TO_FILE(fp);
+ SHOW_PARSE_RESULT_START_TO_FILE(fp);
- fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
- fprintf(fp, "user: %s\n", g_Dbs.user);
- fprintf(fp, "configDir: %s\n", configDir);
- fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
- fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
- fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR);
- fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
- fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
+ fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
+ fprintf(fp, "user: %s\n", g_Dbs.user);
+ fprintf(fp, "configDir: %s\n", configDir);
+ fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
+ fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
+ fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
+ fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
+ fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
- for (int i = 0; i < g_Dbs.dbCount; i++) {
- fprintf(fp, "database[%d]:\n", i);
- fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName);
- if (0 == g_Dbs.db[i].drop) {
- fprintf(fp, " drop: no\n");
- }else {
- fprintf(fp, " drop: yes\n");
- }
-
- if (g_Dbs.db[i].dbCfg.blocks > 0) {
- fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks);
- }
- if (g_Dbs.db[i].dbCfg.cache > 0) {
- fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache);
- }
- if (g_Dbs.db[i].dbCfg.days > 0) {
- fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days);
- }
- if (g_Dbs.db[i].dbCfg.keep > 0) {
- fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep);
- }
- if (g_Dbs.db[i].dbCfg.replica > 0) {
- fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica);
- }
- if (g_Dbs.db[i].dbCfg.update > 0) {
- fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update);
- }
- if (g_Dbs.db[i].dbCfg.minRows > 0) {
- fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows);
- }
- if (g_Dbs.db[i].dbCfg.maxRows > 0) {
- fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows);
- }
- if (g_Dbs.db[i].dbCfg.comp > 0) {
- fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp);
- }
- if (g_Dbs.db[i].dbCfg.walLevel > 0) {
- fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel);
- }
- if (g_Dbs.db[i].dbCfg.fsync > 0) {
- fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync);
- }
- if (g_Dbs.db[i].dbCfg.quorum > 0) {
- fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum);
- }
- if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
- || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
- fprintf(fp, " precision: %s\n",
- g_Dbs.db[i].dbCfg.precision);
- } else {
- fprintf(fp, " precision error: %s\n",
- g_Dbs.db[i].dbCfg.precision);
- }
- }
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ fprintf(fp, "database[%d]:\n", i);
+ fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ fprintf(fp, " drop: no\n");
+ }else {
+ fprintf(fp, " drop: yes\n");
+ }
- fprintf(fp, " super table count: %"PRIu64"\n",
- g_Dbs.db[i].superTblCount);
- for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- fprintf(fp, " super table[%d]:\n", j);
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ fprintf(fp, " precision: %s\n",
+ g_Dbs.db[i].dbCfg.precision);
+ } else {
+ fprintf(fp, " precision error: %s\n",
+ g_Dbs.db[i].dbCfg.precision);
+ }
+ }
- fprintf(fp, " stbName: %s\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ fprintf(fp, " super table count: %"PRIu64"\n",
+ g_Dbs.db[i].superTblCount);
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ fprintf(fp, " super table[%d]:\n", j);
- if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- fprintf(fp, " autoCreateTable: %s\n", "no");
- } else if (AUTO_CREATE_SUBTBL
- == g_Dbs.db[i].superTbls[j].autoCreateTable) {
- fprintf(fp, " autoCreateTable: %s\n", "yes");
- } else {
- fprintf(fp, " autoCreateTable: %s\n", "error");
- }
+ fprintf(fp, " stbName: %s\n",
+ g_Dbs.db[i].superTbls[j].sTblName);
- if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
- fprintf(fp, " childTblExists: %s\n", "no");
- } else if (TBL_ALREADY_EXISTS
- == g_Dbs.db[i].superTbls[j].childTblExists) {
- fprintf(fp, " childTblExists: %s\n", "yes");
- } else {
- fprintf(fp, " childTblExists: %s\n", "error");
- }
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "no");
+ } else if (AUTO_CREATE_SUBTBL
+ == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "yes");
+ } else {
+ fprintf(fp, " autoCreateTable: %s\n", "error");
+ }
- fprintf(fp, " childTblCount: %"PRId64"\n",
- g_Dbs.db[i].superTbls[j].childTblCount);
- fprintf(fp, " childTblPrefix: %s\n",
- g_Dbs.db[i].superTbls[j].childTblPrefix);
- fprintf(fp, " dataSource: %s\n",
- g_Dbs.db[i].superTbls[j].dataSource);
- fprintf(fp, " iface: %s\n",
- (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
- (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
- fprintf(fp, " insertRows: %"PRId64"\n",
- g_Dbs.db[i].superTbls[j].insertRows);
- fprintf(fp, " interlace rows: %u\n",
- g_Dbs.db[i].superTbls[j].interlaceRows);
- if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- fprintf(fp, " stable insert interval: %"PRIu64"\n",
- g_Dbs.db[i].superTbls[j].insertInterval);
- }
-/*
- if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- fprintf(fp, " multiThreadWriteOneTbl: no\n");
- }else {
- fprintf(fp, " multiThreadWriteOneTbl: yes\n");
- }
- */
- fprintf(fp, " interlaceRows: %u\n",
- g_Dbs.db[i].superTbls[j].interlaceRows);
- fprintf(fp, " disorderRange: %d\n",
- g_Dbs.db[i].superTbls[j].disorderRange);
- fprintf(fp, " disorderRatio: %d\n",
- g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %"PRIu64"\n",
- g_Dbs.db[i].superTbls[j].maxSqlLen);
-
- fprintf(fp, " timeStampStep: %"PRId64"\n",
- g_Dbs.db[i].superTbls[j].timeStampStep);
- fprintf(fp, " startTimestamp: %s\n",
- g_Dbs.db[i].superTbls[j].startTimestamp);
- fprintf(fp, " sampleFormat: %s\n",
- g_Dbs.db[i].superTbls[j].sampleFormat);
- fprintf(fp, " sampleFile: %s\n",
- g_Dbs.db[i].superTbls[j].sampleFile);
- fprintf(fp, " tagsFile: %s\n",
- g_Dbs.db[i].superTbls[j].tagsFile);
-
- fprintf(fp, " columnCount: %d\n ",
- g_Dbs.db[i].superTbls[j].columnCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- if ((0 == strncasecmp(
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- "nchar", strlen("nchar")))) {
- fprintf(fp, "column[%d]:%s(%d) ", k,
- g_Dbs.db[i].superTbls[j].columns[k].dataType,
- g_Dbs.db[i].superTbls[j].columns[k].dataLen);
- } else {
- fprintf(fp, "column[%d]:%s ",
- k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
- }
- }
- fprintf(fp, "\n");
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "no");
+ } else if (TBL_ALREADY_EXISTS
+ == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "yes");
+ } else {
+ fprintf(fp, " childTblExists: %s\n", "error");
+ }
- fprintf(fp, " tagCount: %d\n ",
- g_Dbs.db[i].superTbls[j].tagCount);
- for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
- //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "binary", strlen("binary")))
- || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
- "nchar", strlen("nchar")))) {
- fprintf(fp, "tag[%d]:%s(%d) ",
- k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
- g_Dbs.db[i].superTbls[j].tags[k].dataLen);
- } else {
- fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ fprintf(fp, " childTblCount: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].childTblCount);
+ fprintf(fp, " childTblPrefix: %s\n",
+ g_Dbs.db[i].superTbls[j].childTblPrefix);
+ fprintf(fp, " dataSource: %s\n",
+ g_Dbs.db[i].superTbls[j].dataSource);
+ fprintf(fp, " iface: %s\n",
+ (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
+ fprintf(fp, " insertRows: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+ fprintf(fp, " interlace rows: %u\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+ if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
+ g_Dbs.db[i].superTbls[j].insertInterval);
+ }
+ /*
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ fprintf(fp, " multiThreadWriteOneTbl: no\n");
+ }else {
+ fprintf(fp, " multiThreadWriteOneTbl: yes\n");
+ }
+ */
+ fprintf(fp, " interlaceRows: %u\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+ fprintf(fp, " disorderRange: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRange);
+ fprintf(fp, " disorderRatio: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
+
+ fprintf(fp, " timeStampStep: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
+ fprintf(fp, " startTimestamp: %s\n",
+ g_Dbs.db[i].superTbls[j].startTimestamp);
+ fprintf(fp, " sampleFormat: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFormat);
+ fprintf(fp, " sampleFile: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFile);
+ fprintf(fp, " tagsFile: %s\n",
+ g_Dbs.db[i].superTbls[j].tagsFile);
+
+ fprintf(fp, " columnCount: %d\n ",
+ g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", strlen("nchar")))) {
+ fprintf(fp, "column[%d]:%s(%d) ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ fprintf(fp, "column[%d]:%s ",
+ k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
+
+ fprintf(fp, " tagCount: %d\n ",
+ g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
+ fprintf(fp, "tag[%d]:%s(%d) ",
+ k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
}
- }
- fprintf(fp, "\n");
+ fprintf(fp, "\n");
}
- fprintf(fp, "\n");
- }
- SHOW_PARSE_RESULT_END_TO_FILE(fp);
+ SHOW_PARSE_RESULT_END_TO_FILE(fp);
}
static void printfQueryMeta() {
- SHOW_PARSE_RESULT_START();
-
- printf("host: \033[33m%s:%u\033[0m\n",
- g_queryInfo.host, g_queryInfo.port);
- printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
- printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
-
- printf("\n");
-
- if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
- printf("specified table query info: \n");
- printf("sqlCount: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.sqlCount);
- if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
- printf("specified tbl query times:\n");
- printf(" \033[33m%"PRIu64"\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryTimes);
- printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
- g_queryInfo.specifiedQueryInfo.queryInterval);
- printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.concurrent);
- printf("mod: \033[33m%s\033[0m\n",
- (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
- printf("interval: \033[33m%"PRIu64"\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
-
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.specifiedQueryInfo.sql[i]);
- }
- printf("\n");
- }
-
- printf("super table query info:\n");
- printf("sqlCount: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.sqlCount);
-
- if (g_queryInfo.superQueryInfo.sqlCount > 0) {
- printf("query interval: \033[33m%"PRIu64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryInterval);
- printf("threadCnt: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
- g_queryInfo.superQueryInfo.childTblCount);
- printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
- g_queryInfo.superQueryInfo.queryTimes);
-
- printf("mod: \033[33m%s\033[0m\n",
- (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
- printf("interval: \033[33m%"PRIu64"\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeInterval);
- printf("restart: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeRestart);
- printf("keepProgress: \033[33m%d\033[0m\n",
- g_queryInfo.superQueryInfo.subscribeKeepProgress);
-
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- printf(" sql[%d]: \033[33m%s\033[0m\n",
- i, g_queryInfo.superQueryInfo.sql[i]);
- }
- printf("\n");
+ SHOW_PARSE_RESULT_START();
+
+ printf("host: \033[33m%s:%u\033[0m\n",
+ g_queryInfo.host, g_queryInfo.port);
+ printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
+ printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
+
+ printf("\n");
+
+ if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
+ printf("specified table query info: \n");
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
+ printf("specified tbl query times:\n");
+ printf(" \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryTimes);
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryInterval);
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
+
+ printf("super table query info:\n");
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.sqlCount);
+
+ if (g_queryInfo.superQueryInfo.sqlCount > 0) {
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryInterval);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.sTblName);
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
+
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
}
- }
- SHOW_PARSE_RESULT_END();
+ SHOW_PARSE_RESULT_END();
}
static char* formatTimestamp(char* buf, int64_t val, int precision) {
- time_t tt;
- if (precision == TSDB_TIME_PRECISION_NANO) {
- tt = (time_t)(val / 1000000000);
- } else if (precision == TSDB_TIME_PRECISION_MICRO) {
- tt = (time_t)(val / 1000000);
- } else {
- tt = (time_t)(val / 1000);
- }
+ time_t tt;
+ if (precision == TSDB_TIME_PRECISION_NANO) {
+ tt = (time_t)(val / 1000000000);
+ } else if (precision == TSDB_TIME_PRECISION_MICRO) {
+ tt = (time_t)(val / 1000000);
+ } else {
+ tt = (time_t)(val / 1000);
+ }
-/* comment out as it make testcases like select_with_tags.sim fail.
- but in windows, this may cause the call to localtime crash if tt < 0,
- need to find a better solution.
- if (tt < 0) {
- tt = 0;
- }
- */
+ /* comment out as it make testcases like select_with_tags.sim fail.
+ but in windows, this may cause the call to localtime crash if tt < 0,
+ need to find a better solution.
+ if (tt < 0) {
+ tt = 0;
+ }
+ */
#ifdef WINDOWS
- if (tt < 0) tt = 0;
+ if (tt < 0) tt = 0;
#endif
- struct tm* ptm = localtime(&tt);
- size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
+ struct tm* ptm = localtime(&tt);
+ size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
- if (precision == TSDB_TIME_PRECISION_NANO) {
- sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
- } else if (precision == TSDB_TIME_PRECISION_MICRO) {
- sprintf(buf + pos, ".%06d", (int)(val % 1000000));
- } else {
- sprintf(buf + pos, ".%03d", (int)(val % 1000));
- }
+ if (precision == TSDB_TIME_PRECISION_NANO) {
+ sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
+ } else if (precision == TSDB_TIME_PRECISION_MICRO) {
+ sprintf(buf + pos, ".%06d", (int)(val % 1000000));
+ } else {
+ sprintf(buf + pos, ".%03d", (int)(val % 1000));
+ }
- return buf;
+ return buf;
}
static void xDumpFieldToFile(FILE* fp, const char* val,
TAOS_FIELD* field, int32_t length, int precision) {
- if (val == NULL) {
- fprintf(fp, "%s", TSDB_DATA_NULL_STR);
- return;
- }
+ if (val == NULL) {
+ fprintf(fp, "%s", TSDB_DATA_NULL_STR);
+ return;
+ }
- char buf[TSDB_MAX_BYTES_PER_ROW];
- switch (field->type) {
- case TSDB_DATA_TYPE_BOOL:
- fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- fprintf(fp, "%d", *((int8_t *)val));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- fprintf(fp, "%d", *((int16_t *)val));
- break;
- case TSDB_DATA_TYPE_INT:
- fprintf(fp, "%d", *((int32_t *)val));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- fprintf(fp, "%" PRId64, *((int64_t *)val));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
- break;
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- memcpy(buf, val, length);
- buf[length] = 0;
- fprintf(fp, "\'%s\'", buf);
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- formatTimestamp(buf, *(int64_t*)val, precision);
- fprintf(fp, "'%s'", buf);
- break;
- default:
- break;
- }
+ char buf[TSDB_MAX_BYTES_PER_ROW];
+ switch (field->type) {
+ case TSDB_DATA_TYPE_BOOL:
+ fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ fprintf(fp, "%d", *((int8_t *)val));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ fprintf(fp, "%d", *((int16_t *)val));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ fprintf(fp, "%d", *((int32_t *)val));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ fprintf(fp, "%" PRId64, *((int64_t *)val));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ memcpy(buf, val, length);
+ buf[length] = 0;
+ fprintf(fp, "\'%s\'", buf);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ formatTimestamp(buf, *(int64_t*)val, precision);
+ fprintf(fp, "'%s'", buf);
+ break;
+ default:
+ break;
+ }
}
static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
- TAOS_ROW row = taos_fetch_row(tres);
- if (row == NULL) {
- return 0;
- }
-
- FILE* fp = fopen(fname, "at");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file: %s\n", __func__, __LINE__, fname);
- return -1;
- }
-
- int num_fields = taos_num_fields(tres);
- TAOS_FIELD *fields = taos_fetch_fields(tres);
- int precision = taos_result_precision(tres);
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ return 0;
+ }
- for (int col = 0; col < num_fields; col++) {
- if (col > 0) {
- fprintf(fp, ",");
+ FILE* fp = fopen(fname, "at");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file: %s\n",
+ __func__, __LINE__, fname);
+ return -1;
}
- fprintf(fp, "%s", fields[col].name);
- }
- fputc('\n', fp);
- int numOfRows = 0;
- do {
- int32_t* length = taos_fetch_lengths(tres);
- for (int i = 0; i < num_fields; i++) {
- if (i > 0) {
- fputc(',', fp);
- }
- xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision);
+ int num_fields = taos_num_fields(tres);
+ TAOS_FIELD *fields = taos_fetch_fields(tres);
+ int precision = taos_result_precision(tres);
+
+ for (int col = 0; col < num_fields; col++) {
+ if (col > 0) {
+ fprintf(fp, ",");
+ }
+ fprintf(fp, "%s", fields[col].name);
}
fputc('\n', fp);
- numOfRows++;
- row = taos_fetch_row(tres);
- } while( row != NULL);
+ int numOfRows = 0;
+ do {
+ int32_t* length = taos_fetch_lengths(tres);
+ for (int i = 0; i < num_fields; i++) {
+ if (i > 0) {
+ fputc(',', fp);
+ }
+ xDumpFieldToFile(fp,
+ (const char*)row[i], fields +i, length[i], precision);
+ }
+ fputc('\n', fp);
+
+ numOfRows++;
+ row = taos_fetch_row(tres);
+ } while( row != NULL);
- fclose(fp);
+ fclose(fp);
- return numOfRows;
+ return numOfRows;
}
static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
- TAOS_RES * res;
- TAOS_ROW row = NULL;
- int count = 0;
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
- res = taos_query(taos, "show databases;");
- int32_t code = taos_errno(res);
+ res = taos_query(taos, "show databases;");
+ int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint( "failed to run , reason: %s\n", taos_errstr(res));
- return -1;
- }
+ if (code != 0) {
+ errorPrint( "failed to run , reason: %s\n",
+ taos_errstr(res));
+ return -1;
+ }
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- while((row = taos_fetch_row(res)) != NULL) {
- // sys database name : 'log'
- if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
- continue;
- }
+ while((row = taos_fetch_row(res)) != NULL) {
+ // sys database name : 'log'
+ if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
+ continue;
+ }
- dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (dbInfos[count] == NULL) {
- errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count);
- return -1;
- }
+ dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (dbInfos[count] == NULL) {
+ errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count);
+ return -1;
+ }
- tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
- formatTimestamp(dbInfos[count]->create_time,
- *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
- TSDB_TIME_PRECISION_MILLI);
- dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
- dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- dbInfos[count]->cachelast =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- tstrncpy(dbInfos[count]->precision,
- (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
- dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX],
- fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
+ tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ formatTimestamp(dbInfos[count]->create_time,
+ *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
+ TSDB_TIME_PRECISION_MILLI);
+ dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ tstrncpy(dbInfos[count]->precision,
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
+ tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX],
+ fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
- count++;
- if (count > MAX_DATABASE_COUNT) {
- errorPrint("%s() LN%d, The database count overflow than %d\n",
- __func__, __LINE__, MAX_DATABASE_COUNT);
- break;
+ count++;
+ if (count > MAX_DATABASE_COUNT) {
+ errorPrint("%s() LN%d, The database count overflow than %d\n",
+ __func__, __LINE__, MAX_DATABASE_COUNT);
+ break;
+ }
}
- }
- return count;
+ return count;
}
static void printfDbInfoForQueryToFile(
char* filename, SDbInfo* dbInfos, int index) {
- if (filename[0] == 0)
- return;
+ if (filename[0] == 0)
+ return;
- FILE *fp = fopen(filename, "at");
- if (fp == NULL) {
- errorPrint( "failed to open file: %s\n", filename);
- return;
- }
+ FILE *fp = fopen(filename, "at");
+ if (fp == NULL) {
+ errorPrint( "failed to open file: %s\n", filename);
+ return;
+ }
- fprintf(fp, "================ database[%d] ================\n", index);
- fprintf(fp, "name: %s\n", dbInfos->name);
- fprintf(fp, "created_time: %s\n", dbInfos->create_time);
- fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables);
- fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
- fprintf(fp, "replica: %d\n", dbInfos->replica);
- fprintf(fp, "quorum: %d\n", dbInfos->quorum);
- fprintf(fp, "days: %d\n", dbInfos->days);
- fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist);
- fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
- fprintf(fp, "blocks: %d\n", dbInfos->blocks);
- fprintf(fp, "minrows: %d\n", dbInfos->minrows);
- fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
- fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
- fprintf(fp, "fsync: %d\n", dbInfos->fsync);
- fprintf(fp, "comp: %d\n", dbInfos->comp);
- fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
- fprintf(fp, "precision: %s\n", dbInfos->precision);
- fprintf(fp, "update: %d\n", dbInfos->update);
- fprintf(fp, "status: %s\n", dbInfos->status);
- fprintf(fp, "\n");
+ fprintf(fp, "================ database[%d] ================\n", index);
+ fprintf(fp, "name: %s\n", dbInfos->name);
+ fprintf(fp, "created_time: %s\n", dbInfos->create_time);
+ fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables);
+ fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
+ fprintf(fp, "replica: %d\n", dbInfos->replica);
+ fprintf(fp, "quorum: %d\n", dbInfos->quorum);
+ fprintf(fp, "days: %d\n", dbInfos->days);
+ fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist);
+ fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
+ fprintf(fp, "blocks: %d\n", dbInfos->blocks);
+ fprintf(fp, "minrows: %d\n", dbInfos->minrows);
+ fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
+ fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
+ fprintf(fp, "fsync: %d\n", dbInfos->fsync);
+ fprintf(fp, "comp: %d\n", dbInfos->comp);
+ fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
+ fprintf(fp, "precision: %s\n", dbInfos->precision);
+ fprintf(fp, "update: %d\n", dbInfos->update);
+ fprintf(fp, "status: %s\n", dbInfos->status);
+ fprintf(fp, "\n");
- fclose(fp);
+ fclose(fp);
}
static void printfQuerySystemInfo(TAOS * taos) {
- char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
- char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
- TAOS_RES* res;
-
- time_t t;
- struct tm* lt;
- time(&t);
- lt = localtime(&t);
- snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
- lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
- lt->tm_sec);
-
- // show variables
- res = taos_query(taos, "show variables;");
- //fetchResult(res, filename);
- xDumpResultToFile(filename, res);
-
- // show dnodes
- res = taos_query(taos, "show dnodes;");
- xDumpResultToFile(filename, res);
- //fetchResult(res, filename);
-
- // show databases
- res = taos_query(taos, "show databases;");
- SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
- if (dbInfos == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
- return;
- }
- int dbCount = getDbFromServer(taos, dbInfos);
- if (dbCount <= 0) {
- free(dbInfos);
- return;
- }
-
- for (int i = 0; i < dbCount; i++) {
- // printf database info
- printfDbInfoForQueryToFile(filename, dbInfos[i], i);
-
- // show db.vgroups
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
- res = taos_query(taos, buffer);
+ char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
+ char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
+ TAOS_RES* res;
+
+ time_t t;
+ struct tm* lt;
+ time(&t);
+ lt = localtime(&t);
+ snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
+ lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
+ lt->tm_sec);
+
+ // show variables
+ res = taos_query(taos, "show variables;");
+ //fetchResult(res, filename);
xDumpResultToFile(filename, res);
- // show db.stables
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
- res = taos_query(taos, buffer);
+ // show dnodes
+ res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
+ //fetchResult(res, filename);
- free(dbInfos[i]);
- }
+ // show databases
+ res = taos_query(taos, "show databases;");
+ SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
+ if (dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ return;
+ }
+ int dbCount = getDbFromServer(taos, dbInfos);
+ if (dbCount <= 0) {
+ free(dbInfos);
+ return;
+ }
+
+ for (int i = 0; i < dbCount; i++) {
+ // printf database info
+ printfDbInfoForQueryToFile(filename, dbInfos[i], i);
+
+ // show db.vgroups
+ snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
- free(dbInfos);
+ // show db.stables
+ snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
+
+ free(dbInfos[i]);
+ }
+
+ free(dbInfos);
}
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
@@ -2283,98 +2340,107 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
}
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
- char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
- if (NULL == dataBuf) {
- errorPrint("%s() LN%d, calloc failed! size:%d\n",
- __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
- return NULL;
- }
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ errorPrint("%s() LN%d, calloc failed! size:%d\n",
+ __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
+ return NULL;
+ }
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
- return dataBuf;
+ return dataBuf;
}
static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) {
- char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
- if (NULL == dataBuf) {
- printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
- return NULL;
- }
-
- int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
- for (int i = 0; i < stbInfo->tagCount; i++) {
- if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", strlen("binary")))
- || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) {
- if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
- printf("binary or nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- tmfree(dataBuf);
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
return NULL;
- }
+ }
- int tagBufLen = stbInfo->tags[i].dataLen + 1;
- char* buf = (char*)calloc(tagBufLen, 1);
- if (NULL == buf) {
- printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen);
- tmfree(dataBuf);
- return NULL;
- }
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
+ for (int i = 0; i < stbInfo->tagCount; i++) {
+ if ((0 == strncasecmp(stbInfo->tags[i].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "nchar", strlen("nchar")))) {
+ if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ printf("binary or nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ tmfree(dataBuf);
+ return NULL;
+ }
- if (tableSeq % 2) {
- tstrncpy(buf, "beijing", tagBufLen);
- } else {
- tstrncpy(buf, "shanghai", tagBufLen);
- }
- //rand_string(buf, stbInfo->tags[i].dataLen);
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "\'%s\', ", buf);
- tmfree(buf);
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "int", strlen("int"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", tableSeq);
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "bigint", strlen("bigint"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64", ", rand_bigint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "float", strlen("float"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%f, ", rand_float());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "double", strlen("double"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%f, ", rand_double());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "smallint", strlen("smallint"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "tinyint", strlen("tinyint"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "bool", strlen("bool"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%d, ", rand_bool());
- } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
- "timestamp", strlen("timestamp"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64", ", rand_bigint());
- } else {
- printf("No support data type: %s\n", stbInfo->tags[i].dataType);
- tmfree(dataBuf);
- return NULL;
+ int tagBufLen = stbInfo->tags[i].dataLen + 1;
+ char* buf = (char*)calloc(tagBufLen, 1);
+ if (NULL == buf) {
+ printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen);
+ tmfree(dataBuf);
+ return NULL;
+ }
+
+ if (tableSeq % 2) {
+ tstrncpy(buf, "beijing", tagBufLen);
+ } else {
+ tstrncpy(buf, "shanghai", tagBufLen);
+ }
+ //rand_string(buf, stbInfo->tags[i].dataLen);
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "\'%s\', ", buf);
+ tmfree(buf);
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "int", strlen("int"))) {
+ if ((g_args.demo_mode) && (i == 0)) {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", tableSeq % 10);
+ } else {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", tableSeq);
+ }
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "bigint", strlen("bigint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64", ", rand_bigint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "float", strlen("float"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%f, ", rand_float());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "double", strlen("double"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%f, ", rand_double());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "smallint", strlen("smallint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "tinyint", strlen("tinyint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "bool", strlen("bool"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "timestamp", strlen("timestamp"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64", ", rand_bigint());
+ } else {
+ printf("No support data type: %s\n", stbInfo->tags[i].dataType);
+ tmfree(dataBuf);
+ return NULL;
+ }
}
- }
- dataLen -= 2;
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")");
- return dataBuf;
+ dataLen -= 2;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")");
+ return dataBuf;
}
static int calcRowLen(SSuperTable* superTbls) {
@@ -2573,7 +2639,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->tags[tagIndex].dataType,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
superTbls->tags[tagIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->tags[tagIndex].note,
@@ -2586,7 +2652,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->columns[columnIndex].dataType,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
superTbls->columns[columnIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->columns[columnIndex].note,
@@ -2612,168 +2678,195 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
return -1;
}
- getAllChildNameOfSuperTable(taos, dbName,
- superTbls->sTblName,
- &superTbls->childTblName,
- &superTbls->childTblCount);
- }
- */
- return 0;
-}
-
-static int createSuperTable(
- TAOS * taos, char* dbName,
- SSuperTable* superTbl) {
-
- char command[BUFFER_SIZE] = "\0";
-
- char cols[STRING_LEN] = "\0";
- int colIndex;
- int len = 0;
-
- int lenOfOneRow = 0;
-
- if (superTbl->columnCount == 0) {
- errorPrint("%s() LN%d, super table column count is %d\n",
- __func__, __LINE__, superTbl->columnCount);
- return -1;
- }
-
- for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
- char* dataType = superTbl->columns[colIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(cols + len, STRING_LEN - len,
- ", col%d %s(%d)", colIndex, "BINARY",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(cols + len, STRING_LEN - len,
- ", col%d %s(%d)", colIndex, "NCHAR",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
- lenOfOneRow += 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT");
- lenOfOneRow += 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT");
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT");
- lenOfOneRow += 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL");
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT");
- lenOfOneRow += 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE");
- lenOfOneRow += 42;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP");
- lenOfOneRow += 21;
- } else {
- taos_close(taos);
- errorPrint("%s() LN%d, config error data type : %s\n",
- __func__, __LINE__, dataType);
- exit(-1);
- }
+ getAllChildNameOfSuperTable(taos, dbName,
+ superTbls->sTblName,
+ &superTbls->childTblName,
+ &superTbls->childTblCount);
}
+ */
+ return 0;
+}
- superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp
- //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbl[j].sTblName, g_Dbs.db[i].superTbl[j].columnCount, lenOfOneRow);
+static int createSuperTable(
+ TAOS * taos, char* dbName,
+ SSuperTable* superTbl) {
- // save for creating child table
- superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
- if (NULL == superTbl->colsOfCreateChildTable) {
- errorPrint("%s() LN%d, Failed when calloc, size:%d",
- __func__, __LINE__, len+1);
- taos_close(taos);
- exit(-1);
- }
+ char command[BUFFER_SIZE] = "\0";
- snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
- verbosePrint("%s() LN%d: %s\n",
- __func__, __LINE__, superTbl->colsOfCreateChildTable);
+ char cols[STRING_LEN] = "\0";
+ int colIndex;
+ int len = 0;
- if (superTbl->tagCount == 0) {
- errorPrint("%s() LN%d, super table tag count is %d\n",
- __func__, __LINE__, superTbl->tagCount);
- return -1;
- }
+ int lenOfOneRow = 0;
- char tags[STRING_LEN] = "\0";
- int tagIndex;
- len = 0;
+ if (superTbl->columnCount == 0) {
+ errorPrint("%s() LN%d, super table column count is %d\n",
+ __func__, __LINE__, superTbl->columnCount);
+ return -1;
+ }
- int lenOfTagOfOneRow = 0;
- len += snprintf(tags + len, STRING_LEN - len, "(");
- for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) {
- char* dataType = superTbl->tags[tagIndex].dataType;
+ for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
+ char* dataType = superTbl->columns[colIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len,
+ ", col%d %s(%d)", colIndex, "BINARY",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len,
+ ", col%d %s(%d)", colIndex, "NCHAR",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ if ((g_args.demo_mode) && (colIndex == 1)) {
+ len += snprintf(cols + len, STRING_LEN - len,
+ ", VOLTAGE INT");
+ } else {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
+ }
+ lenOfOneRow += 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
+ colIndex, "BIGINT");
+ lenOfOneRow += 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
+ colIndex, "SMALLINT");
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT");
+ lenOfOneRow += 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL");
+ lenOfOneRow += 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ if (g_args.demo_mode) {
+ if (colIndex == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", CURRENT FLOAT");
+ } else if (colIndex == 2) {
+ len += snprintf(cols + len, STRING_LEN - len, ", PHASE FLOAT");
+ }
+ } else {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT");
+ }
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
- "BINARY", superTbl->tags[tagIndex].dataLen);
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
- "NCHAR", superTbl->tags[tagIndex].dataLen);
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "INT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "BIGINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "SMALLINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "TINYINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "BOOL");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "FLOAT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
- "DOUBLE");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
- } else {
- taos_close(taos);
- errorPrint("%s() LN%d, config error tag type : %s\n",
- __func__, __LINE__, dataType);
- exit(-1);
+ lenOfOneRow += 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
+ colIndex, "DOUBLE");
+ lenOfOneRow += 42;
+ } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
+ len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
+ colIndex, "TIMESTAMP");
+ lenOfOneRow += 21;
+ } else {
+ taos_close(taos);
+ errorPrint("%s() LN%d, config error data type : %s\n",
+ __func__, __LINE__, dataType);
+ exit(-1);
+ }
}
- }
- len -= 2;
- len += snprintf(tags + len, STRING_LEN - len, ")");
+ superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp
- superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
+ // save for creating child table
+ superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
+ if (NULL == superTbl->colsOfCreateChildTable) {
+ errorPrint("%s() LN%d, Failed when calloc, size:%d",
+ __func__, __LINE__, len+1);
+ taos_close(taos);
+ exit(-1);
+ }
- snprintf(command, BUFFER_SIZE,
- "create table if not exists %s.%s (ts timestamp%s) tags %s",
- dbName, superTbl->sTblName, cols, tags);
- if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
- errorPrint( "create supertable %s failed!\n\n",
- superTbl->sTblName);
- return -1;
- }
- debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
- return 0;
+ snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
+ verbosePrint("%s() LN%d: %s\n",
+ __func__, __LINE__, superTbl->colsOfCreateChildTable);
+
+ if (superTbl->tagCount == 0) {
+ errorPrint("%s() LN%d, super table tag count is %d\n",
+ __func__, __LINE__, superTbl->tagCount);
+ return -1;
+ }
+
+ char tags[STRING_LEN] = "\0";
+ int tagIndex;
+ len = 0;
+
+ int lenOfTagOfOneRow = 0;
+ len += snprintf(tags + len, STRING_LEN - len, "(");
+ for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) {
+ char* dataType = superTbl->tags[tagIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ if ((g_args.demo_mode) && (tagIndex == 1)) {
+ len += snprintf(tags + len, STRING_LEN - len,
+ "loction BINARY(%d), ",
+ superTbl->tags[tagIndex].dataLen);
+ } else {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ",
+ tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen);
+ }
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
+ "NCHAR", superTbl->tags[tagIndex].dataLen);
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ if ((g_args.demo_mode) && (tagIndex == 0)) {
+ len += snprintf(tags + len, STRING_LEN - len, "groupId INT, ");
+ } else {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "INT");
+ }
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "BIGINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "SMALLINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "TINYINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "BOOL");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "FLOAT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ "DOUBLE");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
+ } else {
+ taos_close(taos);
+ errorPrint("%s() LN%d, config error tag type : %s\n",
+ __func__, __LINE__, dataType);
+ exit(-1);
+ }
+ }
+
+ len -= 2;
+ len += snprintf(tags + len, STRING_LEN - len, ")");
+
+ superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
+
+ snprintf(command, BUFFER_SIZE,
+ "create table if not exists %s.%s (ts timestamp%s) tags %s",
+ dbName, superTbl->sTblName, cols, tags);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
+ errorPrint( "create supertable %s failed!\n\n",
+ superTbl->sTblName);
+ return -1;
+ }
+ debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
+ return 0;
}
static int createDatabasesAndStables() {
@@ -3010,7 +3103,7 @@ static int startMultiThreadCreateChildTable(
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
@@ -3205,13 +3298,6 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
return 0;
}
-#if 0
-int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
- // TODO
- return 0;
-}
-#endif
-
/*
Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/
@@ -3288,9 +3374,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
int columnSize = cJSON_GetArraySize(columns);
- if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) {
+ if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
@@ -3346,9 +3432,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
}
- if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, MAX_NUM_COLUMNS);
goto PARSE_OVER;
}
@@ -3365,9 +3451,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
int tagSize = cJSON_GetArraySize(tags);
- if (tagSize > MAX_TAG_COUNT) {
+ if (tagSize > TSDB_MAX_TAGS) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, TSDB_MAX_TAGS);
goto PARSE_OVER;
}
@@ -3417,17 +3503,17 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
}
- if (index > MAX_TAG_COUNT) {
+ if (index > TSDB_MAX_TAGS) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, TSDB_MAX_TAGS);
goto PARSE_OVER;
}
superTbls->tagCount = index;
- if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
ret = true;
@@ -4394,17 +4480,19 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+ // default value is -1, which mean infinite loop
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
cJSON* endAfterConsume =
cJSON_GetObjectItem(specifiedQuery, "endAfterConsume");
if (endAfterConsume
&& endAfterConsume->type == cJSON_Number) {
g_queryInfo.specifiedQueryInfo.endAfterConsume[j]
= endAfterConsume->valueint;
- } else if (!endAfterConsume) {
- // default value is -1, which mean infinite loop
- g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
}
+ if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
cJSON* resubAfterConsume =
cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume");
if ((resubAfterConsume)
@@ -4412,11 +4500,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
&& (resubAfterConsume->valueint >= 0)) {
g_queryInfo.specifiedQueryInfo.resubAfterConsume[j]
= resubAfterConsume->valueint;
- } else if (!resubAfterConsume) {
- // default value is -1, which mean do not resub
- g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
}
+ if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
+
cJSON *result = cJSON_GetObjectItem(sql, "result");
if ((NULL != result) && (result->type == cJSON_String)
&& (result->valuestring != NULL)) {
@@ -4558,17 +4646,20 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
}
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
cJSON* superEndAfterConsume =
cJSON_GetObjectItem(superQuery, "endAfterConsume");
if (superEndAfterConsume
&& superEndAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.endAfterConsume =
superEndAfterConsume->valueint;
- } else if (!superEndAfterConsume) {
- // default value is -1, which mean do not resub
- g_queryInfo.superQueryInfo.endAfterConsume = -1;
}
+ if (g_queryInfo.superQueryInfo.endAfterConsume < -1)
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
cJSON* superResubAfterConsume =
cJSON_GetObjectItem(superQuery, "resubAfterConsume");
if ((superResubAfterConsume)
@@ -4576,10 +4667,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
&& (superResubAfterConsume->valueint >= 0)) {
g_queryInfo.superQueryInfo.resubAfterConsume =
superResubAfterConsume->valueint;
- } else if (!superResubAfterConsume) {
- // default value is -1, which mean do not resub
- g_queryInfo.superQueryInfo.resubAfterConsume = -1;
}
+ if (g_queryInfo.superQueryInfo.resubAfterConsume < -1)
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
// supert table sqls
cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
@@ -4698,14 +4788,18 @@ PARSE_OVER:
return ret;
}
-static void prepareSampleData() {
+static int prepareSampleData() {
for (int i = 0; i < g_Dbs.dbCount; i++) {
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) {
- (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]);
+ if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) {
+ return -1;
+ }
}
}
}
+
+ return 0;
}
static void postFreeResource() {
@@ -4734,105 +4828,124 @@ static void postFreeResource() {
static int getRowDataFromSample(
char* dataBuf, int64_t maxLen, int64_t timestamp,
- SSuperTable* superTblInfo, int64_t* sampleUsePos) {
- if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
-/* int ret = readSampleFromCsvFileToMem(superTblInfo);
- if (0 != ret) {
- tmfree(superTblInfo->sampleDataBuf);
- superTblInfo->sampleDataBuf = NULL;
- return -1;
+ SSuperTable* superTblInfo, int64_t* sampleUsePos)
+{
+ if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
+ /* int ret = readSampleFromCsvFileToMem(superTblInfo);
+ if (0 != ret) {
+ tmfree(superTblInfo->sampleDataBuf);
+ superTblInfo->sampleDataBuf = NULL;
+ return -1;
+ }
+ */
+ *sampleUsePos = 0;
}
-*/
- *sampleUsePos = 0;
- }
- int dataLen = 0;
+ int dataLen = 0;
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
- "(%" PRId64 ", ", timestamp);
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
- "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos));
- dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
+ "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
+ "%s",
+ superTblInfo->sampleDataBuf
+ + superTblInfo->lenOfOneRow * (*sampleUsePos));
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
- (*sampleUsePos)++;
+ (*sampleUsePos)++;
- return dataLen;
+ return dataLen;
}
static int64_t generateStbRowData(
SSuperTable* stbInfo,
char* recBuf, int64_t timestamp)
{
- int64_t dataLen = 0;
- char *pstr = recBuf;
- int64_t maxLen = MAX_DATA_SIZE;
-
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "(%" PRId64 ",", timestamp);
-
- for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType,
- "BINARY", strlen("BINARY")))
- || (0 == strncasecmp(stbInfo->columns[i].dataType,
- "NCHAR", strlen("NCHAR")))) {
- if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary or nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
+ int64_t dataLen = 0;
+ char *pstr = recBuf;
+ int64_t maxLen = MAX_DATA_SIZE;
+
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "(%" PRId64 ",", timestamp);
+
+ for (int i = 0; i < stbInfo->columnCount; i++) {
+ if ((0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BINARY", strlen("BINARY")))
+ || (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "NCHAR", strlen("NCHAR")))) {
+ if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint( "binary or nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
- char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
- if (NULL == buf) {
- errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
- return -1;
- }
- rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
- tmfree(buf);
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "INT", strlen("INT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_int());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BIGINT", strlen("BIGINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64",", rand_bigint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "FLOAT", strlen("FLOAT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", rand_float());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "DOUBLE", strlen("DOUBLE"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", rand_double());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "SMALLINT", strlen("SMALLINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TINYINT", strlen("TINYINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BOOL", strlen("BOOL"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_bool());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TIMESTAMP", strlen("TIMESTAMP"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64",", rand_bigint());
- } else {
- errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
- return -1;
+ char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
+ if (NULL == buf) {
+ errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ return -1;
+ }
+ rand_string(buf, stbInfo->columns[i].dataLen);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
+ tmfree(buf);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "INT", strlen("INT"))) {
+ if ((g_args.demo_mode) && (i == 1)) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d,", demo_voltage_int());
+ } else {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d,", rand_int());
+ }
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%"PRId64",", rand_bigint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ if (g_args.demo_mode) {
+ if (i == 0) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%f,", demo_current_float());
+ } else {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%f,", demo_phase_float());
+ }
+ } else {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%f,", rand_float());
+ }
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%f,", rand_double());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d,", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d,", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BOOL", strlen("BOOL"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%d,", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "%"PRId64",", rand_bigint());
+ } else {
+ errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
+ return -1;
+ }
}
- }
- dataLen -= 1;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
+ dataLen -= 1;
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
- verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
+ verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen);
+ verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
- return strlen(recBuf);
+ return strlen(recBuf);
}
static int64_t generateData(char *recBuf, char **data_type,
@@ -5082,39 +5195,40 @@ static int32_t generateStbDataTail(
} else {
tsRand = false;
}
- verbosePrint("%s() LN%d batch=%u\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n",
+ __func__, __LINE__, batch, remainderBufLen);
int32_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
- int64_t retLen = 0;
+ int64_t lenOfRow = 0;
if (tsRand) {
- retLen = generateStbRowData(superTblInfo, data,
+ lenOfRow = generateStbRowData(superTblInfo, data,
startTime + getTSRandTail(
superTblInfo->timeStampStep, k,
superTblInfo->disorderRatio,
superTblInfo->disorderRange)
);
} else {
- retLen = getRowDataFromSample(
+ lenOfRow = getRowDataFromSample(
data,
- remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE,
+ (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k,
superTblInfo,
pSamplePos);
}
- if (retLen > remainderBufLen) {
+ if ((lenOfRow + 1) > remainderBufLen) {
break;
}
- pstr += snprintf(pstr , retLen + 1, "%s", data);
+ pstr += snprintf(pstr , lenOfRow + 1, "%s", data);
k++;
- len += retLen;
- remainderBufLen -= retLen;
+ len += lenOfRow;
+ remainderBufLen -= lenOfRow;
verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
@@ -5317,7 +5431,7 @@ static int64_t generateInterlaceDataWithoutStb(
#if STMT_IFACE_ENABLED == 1
static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
- char *dataType, int32_t dataLen, char **ptr)
+ char *dataType, int32_t dataLen, char **ptr, char *value)
{
if (0 == strncasecmp(dataType,
"BINARY", strlen("BINARY"))) {
@@ -5327,12 +5441,18 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1;
}
char *bind_binary = (char *)*ptr;
- rand_string(bind_binary, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_BINARY;
- bind->buffer_length = dataLen;
- bind->buffer = bind_binary;
+ if (value) {
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
+
bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
bind->is_null = NULL;
*ptr += bind->buffer_length;
@@ -5344,9 +5464,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1;
}
char *bind_nchar = (char *)*ptr;
- rand_string(bind_nchar, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ rand_string(bind_nchar, dataLen);
+ }
+
bind->buffer_length = strlen(bind_nchar);
bind->buffer = bind_nchar;
bind->length = &bind->buffer_length;
@@ -5357,7 +5482,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"INT", strlen("INT"))) {
int32_t *bind_int = (int32_t *)*ptr;
- *bind_int = rand_int();
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
bind->buffer_type = TSDB_DATA_TYPE_INT;
bind->buffer_length = sizeof(int32_t);
bind->buffer = bind_int;
@@ -5369,7 +5498,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"BIGINT", strlen("BIGINT"))) {
int64_t *bind_bigint = (int64_t *)*ptr;
- *bind_bigint = rand_bigint();
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_bigint;
@@ -5381,7 +5514,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"FLOAT", strlen("FLOAT"))) {
float *bind_float = (float *) *ptr;
- *bind_float = rand_float();
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
bind->buffer_length = sizeof(float);
bind->buffer = bind_float;
@@ -5393,7 +5530,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"DOUBLE", strlen("DOUBLE"))) {
double *bind_double = (double *)*ptr;
- *bind_double = rand_double();
+ if (value) {
+ *bind_double = atof(value);
+ } else {
+ *bind_double = rand_double();
+ }
bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
bind->buffer_length = sizeof(double);
bind->buffer = bind_double;
@@ -5405,7 +5546,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"SMALLINT", strlen("SMALLINT"))) {
int16_t *bind_smallint = (int16_t *)*ptr;
- *bind_smallint = rand_smallint();
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
bind->buffer_length = sizeof(int16_t);
bind->buffer = bind_smallint;
@@ -5417,7 +5562,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TINYINT", strlen("TINYINT"))) {
int8_t *bind_tinyint = (int8_t *)*ptr;
- *bind_tinyint = rand_tinyint();
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
bind->buffer_length = sizeof(int8_t);
bind->buffer = bind_tinyint;
@@ -5440,7 +5589,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TIMESTAMP", strlen("TIMESTAMP"))) {
int64_t *bind_ts2 = (int64_t *) *ptr;
- *bind_ts2 = rand_bigint();
+ if (value) {
+ *bind_ts2 = atoll(value);
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_ts2;
@@ -5506,12 +5659,13 @@ static int32_t prepareStmtWithoutStb(
ptr += bind->buffer_length;
for (int i = 0; i < g_args.num_of_CPR; i ++) {
- bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1)));
+ bind = (TAOS_BIND *)((char *)bindArray
+ + (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType(
bind,
data_type[i],
g_args.len_of_binary,
- &ptr)) {
+ &ptr, NULL)) {
return -1;
}
}
@@ -5530,12 +5684,14 @@ static int32_t prepareStmtWithoutStb(
return k;
}
-static int32_t prepareStbStmt(SSuperTable *stbInfo,
+static int32_t prepareStbStmt(
+ SSuperTable *stbInfo,
TAOS_STMT *stmt,
char *tableName, uint32_t batch,
uint64_t insertRows,
uint64_t recordFrom,
- int64_t startTime, char *buffer)
+ int64_t startTime,
+ int64_t *pSamplePos)
{
int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) {
@@ -5546,16 +5702,24 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
- errorPrint("Failed to allocate %d bind params\n",
- (stbInfo->columnCount + 1));
+ errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
+ __func__, __LINE__, (stbInfo->columnCount + 1));
return -1;
}
- bool tsRand;
+ bool sourceRand;
if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
- tsRand = true;
+ sourceRand = true;
} else {
- tsRand = false;
+ sourceRand = false; // from sample data file
+ }
+
+ char *bindBuffer = malloc(g_args.len_of_binary);
+ if (bindBuffer == NULL) {
+ errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, g_args.len_of_binary);
+ free(bindArray);
+ return -1;
}
uint32_t k;
@@ -5571,7 +5735,7 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
bind_ts = (int64_t *)ptr;
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- if (tsRand) {
+ if (sourceRand) {
*bind_ts = startTime + getTSRandTail(
stbInfo->timeStampStep, k,
stbInfo->disorderRatio,
@@ -5586,14 +5750,46 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
ptr += bind->buffer_length;
+ int cursor = 0;
for (int i = 0; i < stbInfo->columnCount; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1)));
- if ( -1 == prepareStmtBindArrayByType(
- bind,
- stbInfo->columns[i].dataType,
- stbInfo->columns[i].dataLen,
- &ptr)) {
- return -1;
+
+ if (sourceRand) {
+ if ( -1 == prepareStmtBindArrayByType(
+ bind,
+ stbInfo->columns[i].dataType,
+ stbInfo->columns[i].dataLen,
+ &ptr,
+ NULL)) {
+ free(bindArray);
+ free(bindBuffer);
+ return -1;
+ }
+ } else {
+ char *restStr = stbInfo->sampleDataBuf + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ memset(bindBuffer, 0, g_args.len_of_binary);
+ strncpy(bindBuffer, restStr, index);
+ cursor += index + 1; // skip ',' too
+
+ if ( -1 == prepareStmtBindArrayByType(
+ bind,
+ stbInfo->columns[i].dataType,
+ stbInfo->columns[i].dataLen,
+ &ptr,
+ bindBuffer)) {
+ free(bindArray);
+ free(bindBuffer);
+ return -1;
+ }
}
}
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
@@ -5602,11 +5798,16 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
k++;
recordFrom ++;
+
+ if (!sourceRand) {
+ (*pSamplePos) ++;
+ }
if (recordFrom >= insertRows) {
break;
}
}
+ free(bindBuffer);
free(bindArray);
return k;
}
@@ -5799,13 +6000,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (superTblInfo) {
if (superTblInfo->iface == STMT_IFACE) {
#if STMT_IFACE_ENABLED == 1
- generated = prepareStbStmt(superTblInfo,
+ generated = prepareStbStmt(
+ superTblInfo,
pThreadInfo->stmt,
tableName,
batchPerTbl,
insertRows, i,
startTime,
- pThreadInfo->buffer);
+ &(pThreadInfo->samplePos));
#else
generated = -1;
#endif
@@ -5903,11 +6105,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampMs();
if (recOfBatch == 0) {
- errorPrint("[%d] %s() LN%d try inserting records of batch is %d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- recOfBatch);
- errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n");
- goto free_of_interlace;
+ errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n",
+ maxSqlLen, batchPerTbl);
+ goto free_of_interlace;
}
int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
@@ -6027,7 +6234,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->stmt,
tableName,
g_args.num_of_RPR,
- insertRows, i, start_time, pstr);
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos));
#else
generated = -1;
#endif
@@ -6393,7 +6601,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
assert(pids != NULL);
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
@@ -6769,7 +6977,11 @@ static int insertTestProcess() {
}
// pretreatement
- prepareSampleData();
+ if (prepareSampleData() != 0) {
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
+ return -1;
+ }
double start;
double end;
@@ -7048,8 +7260,8 @@ static int queryTestProcess() {
if ((nSqlCount > 0) && (nConcurrent > 0)) {
- pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t));
- infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo));
+ pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
+ infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
taos_close(taos);
@@ -7094,8 +7306,8 @@ static int queryTestProcess() {
//==== create sub threads for query from all sub table of the super table
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
- infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
+ pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
free(infos);
@@ -7221,151 +7433,159 @@ static TAOS_SUB* subscribeImpl(
}
if (tsub == NULL) {
- printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
- return NULL;
+ errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ return NULL;
}
return tsub;
}
static void *superSubscribe(void *sarg) {
- threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[MAX_QUERY_SQL_LENGTH];
- TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
- uint64_t tsubSeq;
-
- if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
- errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
- pThreadInfo->ntables,
- MAX_QUERY_SQL_COUNT);
- exit(-1);
- }
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ char subSqlstr[MAX_QUERY_SQL_LENGTH];
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ uint64_t tsubSeq;
+
+ if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
+ errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
+ pThreadInfo->ntables, MAX_QUERY_SQL_COUNT);
+ exit(-1);
+ }
- if (pThreadInfo->taos == NULL) {
- pThreadInfo->taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- g_queryInfo.dbName,
- g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
- return NULL;
+ pThreadInfo->taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ g_queryInfo.dbName,
+ g_queryInfo.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ }
}
- }
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ char sqlStr[MAX_TB_NAME_SIZE*2];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
- return NULL;
- }
+ return NULL;
+ }
- char topic[32] = {0};
- for (uint64_t i = pThreadInfo->start_table_from;
- i <= pThreadInfo->end_table_to; i++) {
+ char topic[32] = {0};
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ tsubSeq = i - pThreadInfo->start_table_from;
+ verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID,
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to, i);
+ sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
+ i, pThreadInfo->querySeq);
+ memset(subSqlstr, 0, sizeof(subSqlstr));
+ replaceChildTblName(
+ g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
+ subSqlstr, i);
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
- tsubSeq = i - pThreadInfo->start_table_from;
- verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
- __func__, __LINE__,
- pThreadInfo->threadID,
- pThreadInfo->start_table_from,
- pThreadInfo->end_table_to, i);
- sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
- i, pThreadInfo->querySeq);
- memset(subSqlstr, 0, sizeof(subSqlstr));
- replaceChildTblName(
- g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
- subSqlstr, i);
- if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- }
+ verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n",
+ __func__, __LINE__, pThreadInfo->threadID, subSqlstr);
+ tsub[tsubSeq] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlstr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ if (NULL == tsub[tsubSeq]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
- debugPrint("%s() LN%d, [%d] subSqlstr: %s\n",
- __func__, __LINE__, pThreadInfo->threadID, subSqlstr);
- tsub[tsubSeq] = subscribeImpl(
- STABLE_CLASS,
- pThreadInfo, subSqlstr, topic,
- g_queryInfo.superQueryInfo.subscribeRestart,
- g_queryInfo.superQueryInfo.subscribeInterval);
- if (NULL == tsub[tsubSeq]) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
- }
+ // start loop to consume result
+ int consumed[MAX_QUERY_SQL_COUNT];
+ for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) {
+ consumed[i] = 0;
+ }
+ TAOS_RES* res = NULL;
- // start loop to consume result
- int consumed[MAX_QUERY_SQL_COUNT];
- for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) {
- consumed[i] = 0;
- }
- TAOS_RES* res = NULL;
+ uint64_t st = 0, et = 0;
+
+ while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
+ || (g_queryInfo.superQueryInfo.endAfterConsume >
+ consumed[pThreadInfo->end_table_to
+ - pThreadInfo->start_table_from])) {
- uint64_t st = 0, et = 0;
+ verbosePrint("super endAfterConsume: %d, consumed: %d\n",
+ g_queryInfo.superQueryInfo.endAfterConsume,
+ consumed[pThreadInfo->end_table_to
+ - pThreadInfo->start_table_from]);
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ tsubSeq = i - pThreadInfo->start_table_from;
+ if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
+ continue;
+ }
- while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
- || (g_queryInfo.superQueryInfo.endAfterConsume <
- consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) {
+ st = taosGetTimestampMs();
+ performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et));
+ res = taos_consume(tsub[tsubSeq]);
+ et = taosGetTimestampMs();
+ performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
+
+ if (res) {
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ fetchResult(res, pThreadInfo);
+ }
+ consumed[tsubSeq] ++;
+
+ if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1)
+ && (consumed[tsubSeq] >=
+ g_queryInfo.superQueryInfo.resubAfterConsume)) {
+ verbosePrint("%s() LN%d, keepProgress:%d, resub super table query: %"PRIu64"\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ taos_unsubscribe(tsub[tsubSeq],
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ consumed[tsubSeq]= 0;
+ tsub[tsubSeq] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlstr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval
+ );
+ if (NULL == tsub[tsubSeq]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
+ }
+ }
+ }
+ verbosePrint("%s() LN%d, super endAfterConsume: %d, consumed: %d\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.endAfterConsume,
+ consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]);
+ taos_free_result(res);
for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
- tsubSeq = i - pThreadInfo->start_table_from;
- if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
- continue;
- }
-
- st = taosGetTimestampMs();
- performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et));
- res = taos_consume(tsub[tsubSeq]);
- et = taosGetTimestampMs();
- performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
-
- if (res) {
- if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- fetchResult(res, pThreadInfo);
- }
- consumed[tsubSeq] ++;
-
- if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1)
- && (consumed[tsubSeq] >=
- g_queryInfo.superQueryInfo.resubAfterConsume)) {
- printf("keepProgress:%d, resub super table query: %"PRIu64"\n",
- g_queryInfo.superQueryInfo.subscribeKeepProgress,
- pThreadInfo->querySeq);
- taos_unsubscribe(tsub[tsubSeq],
- g_queryInfo.superQueryInfo.subscribeKeepProgress);
- consumed[tsubSeq]= 0;
- tsub[tsubSeq] = subscribeImpl(
- STABLE_CLASS,
- pThreadInfo, subSqlstr, topic,
- g_queryInfo.superQueryInfo.subscribeRestart,
- g_queryInfo.superQueryInfo.subscribeInterval
- );
- if (NULL == tsub[tsubSeq]) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
- }
- }
+ tsubSeq = i - pThreadInfo->start_table_from;
+ taos_unsubscribe(tsub[tsubSeq], 0);
}
- }
- taos_free_result(res);
-
- for (uint64_t i = pThreadInfo->start_table_from;
- i <= pThreadInfo->end_table_to; i++) {
- tsubSeq = i - pThreadInfo->start_table_from;
- taos_unsubscribe(tsub[tsubSeq], 0);
- }
- taos_close(pThreadInfo->taos);
- return NULL;
+ taos_close(pThreadInfo->taos);
+ return NULL;
}
static void *specifiedSubscribe(void *sarg) {
@@ -7419,8 +7639,13 @@ static void *specifiedSubscribe(void *sarg) {
|| (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] <
g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) {
+ printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n",
+ pThreadInfo->threadID,
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID],
+ pThreadInfo->querySeq,
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]);
if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
- continue;
+ continue;
}
g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
@@ -7462,7 +7687,6 @@ static void *specifiedSubscribe(void *sarg) {
}
}
taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
- taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0);
taos_close(pThreadInfo->taos);
return NULL;
@@ -7516,11 +7740,13 @@ static int subscribeTestProcess() {
exit(-1);
}
- pids = malloc(
+ pids = calloc(
+ 1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(pthread_t));
- infos = malloc(
+ infos = calloc(
+ 1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
@@ -7549,11 +7775,13 @@ static int subscribeTestProcess() {
} else {
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfStable = malloc(
+ pidsOfStable = calloc(
+ 1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(pthread_t));
- infosOfStable = malloc(
+ infosOfStable = calloc(
+ 1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
@@ -7717,7 +7945,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
g_Dbs.db[0].superTbls[0].columnCount = 0;
- for (int i = 0; i < MAX_NUM_DATATYPE; i++) {
+ for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
if (data_type[i] == NULL) {
break;
}
@@ -7870,7 +8098,7 @@ static void queryResult() {
// query data
pthread_t read_id;
- threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
+ threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
assert(pThreadInfo);
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
index 58897b89e95743c802755c0476f3b2843a244a59..51f4748eab462c8e883e83cd5923f38dd7fb9b5a 100644
--- a/src/kit/taosdump/CMakeLists.txt
+++ b/src/kit/taosdump/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 05c6b1efbb7990260d5eaef454d4d3a339ec5268..98521d842064c8fe1b07478810c15d870ceaadf5 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -25,17 +25,22 @@
#include "tsclient.h"
#include "tsdb.h"
#include "tutil.h"
+#include
-#define COMMAND_SIZE 65536
+#define TSDB_SUPPORT_NANOSECOND 1
+
+#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
+#define COMMAND_SIZE 65536
+#define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql"
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
-int converStringToReadable(char *str, int size, char *buf, int bufsize);
-int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
-void taosDumpCharset(FILE *fp);
-void taosLoadFileCharset(FILE *fp, char *fcharset);
+static int converStringToReadable(char *str, int size, char *buf, int bufsize);
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
+static void taosDumpCharset(FILE *fp);
+static void taosLoadFileCharset(FILE *fp, char *fcharset);
typedef struct {
short bytes;
@@ -60,120 +65,123 @@ typedef struct {
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index {
- TSDB_SHOW_DB_NAME_INDEX,
- TSDB_SHOW_DB_CREATED_TIME_INDEX,
- TSDB_SHOW_DB_NTABLES_INDEX,
- TSDB_SHOW_DB_VGROUPS_INDEX,
- TSDB_SHOW_DB_REPLICA_INDEX,
- TSDB_SHOW_DB_QUORUM_INDEX,
- TSDB_SHOW_DB_DAYS_INDEX,
- TSDB_SHOW_DB_KEEP_INDEX,
- TSDB_SHOW_DB_CACHE_INDEX,
- TSDB_SHOW_DB_BLOCKS_INDEX,
- TSDB_SHOW_DB_MINROWS_INDEX,
- TSDB_SHOW_DB_MAXROWS_INDEX,
- TSDB_SHOW_DB_WALLEVEL_INDEX,
- TSDB_SHOW_DB_FSYNC_INDEX,
- TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_CACHELAST_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
- TSDB_SHOW_DB_UPDATE_INDEX,
- TSDB_SHOW_DB_STATUS_INDEX,
- TSDB_MAX_SHOW_DB
+ TSDB_SHOW_DB_NAME_INDEX,
+ TSDB_SHOW_DB_CREATED_TIME_INDEX,
+ TSDB_SHOW_DB_NTABLES_INDEX,
+ TSDB_SHOW_DB_VGROUPS_INDEX,
+ TSDB_SHOW_DB_REPLICA_INDEX,
+ TSDB_SHOW_DB_QUORUM_INDEX,
+ TSDB_SHOW_DB_DAYS_INDEX,
+ TSDB_SHOW_DB_KEEP_INDEX,
+ TSDB_SHOW_DB_CACHE_INDEX,
+ TSDB_SHOW_DB_BLOCKS_INDEX,
+ TSDB_SHOW_DB_MINROWS_INDEX,
+ TSDB_SHOW_DB_MAXROWS_INDEX,
+ TSDB_SHOW_DB_WALLEVEL_INDEX,
+ TSDB_SHOW_DB_FSYNC_INDEX,
+ TSDB_SHOW_DB_COMP_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_UPDATE_INDEX,
+ TSDB_SHOW_DB_STATUS_INDEX,
+ TSDB_MAX_SHOW_DB
};
// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
enum _show_tables_index {
- TSDB_SHOW_TABLES_NAME_INDEX,
- TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
- TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
- TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
- TSDB_MAX_SHOW_TABLES
+ TSDB_SHOW_TABLES_NAME_INDEX,
+ TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
+ TSDB_SHOW_TABLES_COLUMNS_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_TID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_MAX_SHOW_TABLES
};
// ---------------------------------- DESCRIBE METRIC CONFIGURE ------------------------------
enum _describe_table_index {
- TSDB_DESCRIBE_METRIC_FIELD_INDEX,
- TSDB_DESCRIBE_METRIC_TYPE_INDEX,
- TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
- TSDB_DESCRIBE_METRIC_NOTE_INDEX,
- TSDB_MAX_DESCRIBE_METRIC
+ TSDB_DESCRIBE_METRIC_FIELD_INDEX,
+ TSDB_DESCRIBE_METRIC_TYPE_INDEX,
+ TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
+ TSDB_DESCRIBE_METRIC_NOTE_INDEX,
+ TSDB_MAX_DESCRIBE_METRIC
};
#define COL_NOTE_LEN 128
typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
- char type[16];
- int length;
- char note[COL_NOTE_LEN];
+ char field[TSDB_COL_NAME_LEN + 1];
+ char type[16];
+ int length;
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- SColDes cols[];
+ char name[TSDB_TABLE_NAME_LEN];
+ SColDes cols[];
} STableDef;
extern char version[];
+#define DB_PRECISION_LEN 8
+#define DB_STATUS_LEN 16
+
typedef struct {
- char name[TSDB_DB_NAME_LEN];
- char create_time[32];
- int32_t ntables;
- int32_t vgroups;
- int16_t replica;
- int16_t quorum;
- int16_t days;
- char keeplist[32];
- //int16_t daysToKeep;
- //int16_t daysToKeep1;
- //int16_t daysToKeep2;
- int32_t cache; //MB
- int32_t blocks;
- int32_t minrows;
- int32_t maxrows;
- int8_t wallevel;
- int32_t fsync;
- int8_t comp;
- int8_t cachelast;
- char precision[8]; // time resolution
- int8_t update;
- char status[16];
+ char name[TSDB_DB_NAME_LEN];
+ char create_time[32];
+ int32_t ntables;
+ int32_t vgroups;
+ int16_t replica;
+ int16_t quorum;
+ int16_t days;
+ char keeplist[32];
+ //int16_t daysToKeep;
+ //int16_t daysToKeep1;
+ //int16_t daysToKeep2;
+ int32_t cache; //MB
+ int32_t blocks;
+ int32_t minrows;
+ int32_t maxrows;
+ int8_t wallevel;
+ int32_t fsync;
+ int8_t comp;
+ int8_t cachelast;
+ char precision[DB_PRECISION_LEN]; // time resolution
+ int8_t update;
+ char status[DB_STATUS_LEN];
} SDbInfo;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- char metric[TSDB_TABLE_NAME_LEN];
+ char name[TSDB_TABLE_NAME_LEN];
+ char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
- bool isMetric;
- STableRecord tableRecord;
+ bool isMetric;
+ STableRecord tableRecord;
} STableRecordInfo;
typedef struct {
- pthread_t threadID;
- int32_t threadIndex;
- int32_t totalThreads;
- char dbName[TSDB_DB_NAME_LEN];
- void *taosCon;
- int64_t rowsOfDumpOut;
- int64_t tablesOfDumpOut;
+ pthread_t threadID;
+ int32_t threadIndex;
+ int32_t totalThreads;
+ char dbName[TSDB_DB_NAME_LEN];
+ void *taosCon;
+ int64_t rowsOfDumpOut;
+ int64_t tablesOfDumpOut;
} SThreadParaObj;
typedef struct {
- int64_t totalRowsOfDumpOut;
- int64_t totalChildTblsOfDumpOut;
- int32_t totalSuperTblsOfDumpOut;
- int32_t totalDatabasesOfDumpOut;
+ int64_t totalRowsOfDumpOut;
+ int64_t totalChildTblsOfDumpOut;
+ int32_t totalSuperTblsOfDumpOut;
+ int32_t totalDatabasesOfDumpOut;
} resultStatistics;
-static int64_t totalDumpOutRows = 0;
+static int64_t g_totalDumpOutRows = 0;
-SDbInfo **dbInfos = NULL;
+SDbInfo **g_dbInfos = NULL;
const char *argp_program_version = version;
const char *argp_program_bug_address = "";
@@ -194,1468 +202,1657 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat
/* The options we understand. */
static struct argp_option options[] = {
- // connection option
- {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
- {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
- #ifdef _TD_POWER_
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
- #else
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
- #endif
- {"port", 'P', "PORT", 0, "Port to connect", 0},
- {"cversion", 'v', "CVERION", 0, "client version", 0},
- {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
- // input/output file
- {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
- {"inpath", 'i', "INPATH", 0, "Input file path.", 1},
- {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
- #ifdef _TD_POWER_
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
- #else
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
- #endif
- {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
- // dump unit options
- {"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'D', 0, 0, "Dump assigned databases", 2},
- // dump format options
- {"schemaonly", 's', 0, 0, "Only dump schema.", 3},
- {"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
- {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
- {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
- {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
- {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
- {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
- {"debug", 'g', 0, 0, "Print debug info.", 1},
- {"verbose", 'v', 0, 0, "Print verbose debug info.", 1},
- {0}};
+ // connection option
+ {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
+ {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
+#ifdef _TD_POWER_
+ {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
+#else
+ {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
+#endif
+ {"port", 'P', "PORT", 0, "Port to connect", 0},
+ {"cversion", 'v', "CVERION", 0, "client version", 0},
+ {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
+ // input/output file
+ {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
+ {"inpath", 'i', "INPATH", 0, "Input file path.", 1},
+ {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
+#ifdef _TD_POWER_
+ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
+#else
+ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
+#endif
+ {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
+ // dump unit options
+ {"all-databases", 'A', 0, 0, "Dump all databases.", 2},
+ {"databases", 'D', 0, 0, "Dump assigned databases", 2},
+ {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2},
+ // dump format options
+ {"schemaonly", 's', 0, 0, "Only dump schema.", 2},
+ {"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
+ {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
+ {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
+ {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
+#if TSDB_SUPPORT_NANOSECOND == 1
+ {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
+#else
+ {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
+#endif
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
+ {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
+ {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
+ {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
+ {"debug", 'g', 0, 0, "Print debug info.", 8},
+ {"verbose", 'b', 0, 0, "Print verbose debug info.", 9},
+ {"performanceprint", 'm', 0, 0, "Print performance debug info.", 10},
+ {0}
+};
/* Used by main to communicate with parse_opt. */
typedef struct arguments {
- // connection option
- char *host;
- char *user;
- char *password;
- uint16_t port;
- char cversion[12];
- uint16_t mysqlFlag;
- // output file
- char outpath[TSDB_FILENAME_LEN+1];
- char inpath[TSDB_FILENAME_LEN+1];
- // result file
- char *resultFile;
- char *encode;
- // dump unit option
- bool all_databases;
- bool databases;
- // dump format option
- bool schemaonly;
- bool with_property;
- int64_t start_time;
- int64_t end_time;
- int32_t data_batch;
- int32_t max_sql_len;
- int32_t table_batch; // num of table which will be dump into one output file.
- bool allow_sys;
- // other options
- int32_t thread_num;
- int abort;
- char **arg_list;
- int arg_list_len;
- bool isDumpIn;
- bool debug_print;
- bool verbose_print;
- bool performance_print;
-} SArguments;
-
-/* Parse a single option. */
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- /* Get the input argument from argp_parse, which we
- know is a pointer to our arguments structure. */
- struct arguments *arguments = state->input;
- wordexp_t full_path;
-
- switch (key) {
// connection option
- case 'a':
- arguments->allow_sys = true;
- break;
- case 'h':
- arguments->host = arg;
- break;
- case 'u':
- arguments->user = arg;
- break;
- case 'p':
- arguments->password = arg;
- break;
- case 'P':
- arguments->port = atoi(arg);
- break;
- case 'q':
- arguments->mysqlFlag = atoi(arg);
- break;
- case 'v':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid client vesion %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->cversion, full_path.we_wordv[0], 11);
- wordfree(&full_path);
- break;
- // output file path
- case 'o':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'g':
- arguments->debug_print = true;
- break;
- case 'i':
- arguments->isDumpIn = true;
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'r':
- arguments->resultFile = arg;
- break;
- case 'c':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'e':
- arguments->encode = arg;
- break;
+ char *host;
+ char *user;
+ char *password;
+ uint16_t port;
+ char cversion[12];
+ uint16_t mysqlFlag;
+ // output file
+ char outpath[MAX_FILE_NAME_LEN];
+ char inpath[MAX_FILE_NAME_LEN];
+ // result file
+ char *resultFile;
+ char *encode;
// dump unit option
- case 'A':
- arguments->all_databases = true;
- break;
- case 'D':
- arguments->databases = true;
- break;
+ bool all_databases;
+ bool databases;
// dump format option
- case 's':
- arguments->schemaonly = true;
- break;
- case 'N':
- arguments->with_property = false;
- break;
- case 'S':
- // parse time here.
- arguments->start_time = atol(arg);
- break;
- case 'E':
- arguments->end_time = atol(arg);
- break;
- case 'B':
- arguments->data_batch = atoi(arg);
- if (arguments->data_batch >= INT16_MAX) {
- arguments->data_batch = INT16_MAX - 1;
- }
- break;
- case 'L':
- {
- int32_t len = atoi(arg);
- if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
- len = TSDB_MAX_ALLOWED_SQL_LEN;
- } else if (len < TSDB_MAX_SQL_LEN) {
- len = TSDB_MAX_SQL_LEN;
- }
- arguments->max_sql_len = len;
- break;
- }
- case 't':
- arguments->table_batch = atoi(arg);
- break;
- case 'T':
- arguments->thread_num = atoi(arg);
- break;
- case OPT_ABORT:
- arguments->abort = 1;
- break;
- case ARGP_KEY_ARG:
- arguments->arg_list = &state->argv[state->next - 1];
- arguments->arg_list_len = state->argc - state->next + 1;
- state->next = state->argc;
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
+ bool schemaonly;
+ bool with_property;
+ bool avro;
+ int64_t start_time;
+ int64_t end_time;
+ char precision[8];
+ int32_t data_batch;
+ int32_t max_sql_len;
+ int32_t table_batch; // num of table which will be dump into one output file.
+ bool allow_sys;
+ // other options
+ int32_t thread_num;
+ int abort;
+ char **arg_list;
+ int arg_list_len;
+ bool isDumpIn;
+ bool debug_print;
+ bool verbose_print;
+ bool performance_print;
+} SArguments;
/* Our argp parser. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state);
+
static struct argp argp = {options, parse_opt, args_doc, doc};
static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-static int taosDumpOut(struct arguments *arguments);
-static int taosDumpIn(struct arguments *arguments);
-static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
-static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
-static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
-static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
-static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
-static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
-static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
+static int taosDumpOut();
+static int taosDumpIn();
+static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
+ FILE *fp);
+static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon);
+static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon,
+ char* dbName);
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName);
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
+ int numOfCols, FILE *fp, char* dbName);
+static int32_t taosDumpTable(char *table, char *metric,
+ FILE *fp, TAOS* taosCon, char* dbName);
+static int taosDumpTableData(FILE *fp, char *tbName,
+ TAOS* taosCon, char* dbName,
+ char *jsonAvroSchema);
static int taosCheckParam(struct arguments *arguments);
static void taosFreeDbInfos();
-static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
+static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName);
struct arguments g_args = {
- // connection option
- NULL,
- "root",
- #ifdef _TD_POWER_
- "powerdb",
- #else
- "taosdata",
- #endif
- 0,
- "",
- 0,
- // outpath and inpath
- "",
- "",
- "./dump_result.txt",
- NULL,
- // dump unit option
- false,
- false,
- // dump format option
- false, // schemeonly
- true, // with_property
- 0,
- INT64_MAX,
- 1,
- TSDB_MAX_SQL_LEN,
- 1,
- false,
- // other options
- 5,
- 0,
- NULL,
- 0,
- false,
- false, // debug_print
- false, // verbose_print
- false // performance_print
+ // connection option
+ NULL,
+ "root",
+#ifdef _TD_POWER_
+ "powerdb",
+#else
+ "taosdata",
+#endif
+ 0,
+ "",
+ 0,
+ // outpath and inpath
+ "",
+ "",
+ "./dump_result.txt",
+ NULL,
+ // dump unit option
+ false,
+ false,
+ // dump format option
+ false, // schemeonly
+ true, // with_property
+ false, // avro format
+ -INT64_MAX, // start_time
+ INT64_MAX, // end_time
+ "ms", // precision
+ 1, // data_batch
+ TSDB_MAX_SQL_LEN, // max_sql_len
+ 1, // table_batch
+ false, // allow_sys
+ // other options
+ 5, // thread_num
+ 0, // abort
+ NULL, // arg_list
+ 0, // arg_list_len
+ false, // isDumpIn
+ false, // debug_print
+ false, // verbose_print
+ false // performance_print
};
+/* Parse a single option. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state) {
+ /* Get the input argument from argp_parse, which we
+ know is a pointer to our arguments structure. */
+ wordexp_t full_path;
+
+ switch (key) {
+ // connection option
+ case 'a':
+ g_args.allow_sys = true;
+ break;
+ case 'h':
+ g_args.host = arg;
+ break;
+ case 'u':
+ g_args.user = arg;
+ break;
+ case 'p':
+ g_args.password = arg;
+ break;
+ case 'P':
+ g_args.port = atoi(arg);
+ break;
+ case 'q':
+ g_args.mysqlFlag = atoi(arg);
+ break;
+ case 'v':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid client vesion %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.cversion, full_path.we_wordv[0], 11);
+ wordfree(&full_path);
+ break;
+ // output file path
+ case 'o':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.outpath, full_path.we_wordv[0],
+ MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'g':
+ g_args.debug_print = true;
+ break;
+ case 'i':
+ g_args.isDumpIn = true;
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.inpath, full_path.we_wordv[0],
+ MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'r':
+ g_args.resultFile = arg;
+ break;
+ case 'c':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'e':
+ g_args.encode = arg;
+ break;
+ // dump unit option
+ case 'A':
+ g_args.all_databases = true;
+ break;
+ case 'D':
+ g_args.databases = true;
+ break;
+ // dump format option
+ case 's':
+ g_args.schemaonly = true;
+ break;
+ case 'N':
+ g_args.with_property = false;
+ break;
+ case 'V':
+ g_args.avro = true;
+ break;
+ case 'S':
+ // parse time here.
+ g_args.start_time = atol(arg);
+ break;
+ case 'E':
+ g_args.end_time = atol(arg);
+ break;
+ case 'B':
+ g_args.data_batch = atoi(arg);
+ if (g_args.data_batch > MAX_RECORDS_PER_REQ) {
+ g_args.data_batch = MAX_RECORDS_PER_REQ;
+ }
+ break;
+ case 'L':
+ {
+ int32_t len = atoi(arg);
+ if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
+ len = TSDB_MAX_ALLOWED_SQL_LEN;
+ } else if (len < TSDB_MAX_SQL_LEN) {
+ len = TSDB_MAX_SQL_LEN;
+ }
+ g_args.max_sql_len = len;
+ break;
+ }
+ case 't':
+ g_args.table_batch = atoi(arg);
+ break;
+ case 'T':
+ g_args.thread_num = atoi(arg);
+ break;
+ case OPT_ABORT:
+ g_args.abort = 1;
+ break;
+ case ARGP_KEY_ARG:
+ g_args.arg_list = &state->argv[state->next - 1];
+ g_args.arg_list_len = state->argc - state->next + 1;
+ state->next = state->argc;
+ break;
+
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
static int queryDbImpl(TAOS *taos, char *command) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
+ int i;
+ TAOS_RES *res = NULL;
+ int32_t code = -1;
+
+ for (i = 0; i < 5; i++) {
+ if (NULL != res) {
+ taos_free_result(res);
+ res = NULL;
+ }
- for (i = 0; i < 5; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
+ res = taos_query(taos, command);
+ code = taos_errno(res);
+ if (0 == code) {
+ break;
+ }
}
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
+ if (code != 0) {
+ errorPrint("Failed to run <%s>, reason: %s\n", command, taos_errstr(res));
+ taos_free_result(res);
+ //taos_close(taos);
+ return -1;
}
- }
- if (code != 0) {
- fprintf(stderr, "Failed to run <%s>, reason: %s\n", command, taos_errstr(res));
taos_free_result(res);
- //taos_close(taos);
- return -1;
- }
+ return 0;
+}
- taos_free_result(res);
- return 0;
+static void parse_precision_first(
+ int argc, char *argv[], SArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-C") == 0) {
+ if (NULL == argv[i+1]) {
+ errorPrint("%s need a valid value following!\n", argv[i]);
+ exit(-1);
+ }
+ char *tmp = strdup(argv[i+1]);
+ if (tmp == NULL) {
+ errorPrint("%s() LN%d, strdup() cannot allocate memory\n",
+ __func__, __LINE__);
+ exit(-1);
+ }
+ if ((0 != strncasecmp(tmp, "ms", strlen("ms")))
+ && (0 != strncasecmp(tmp, "us", strlen("us")))
+#if TSDB_SUPPORT_NANOSECOND == 1
+ && (0 != strncasecmp(tmp, "ns", strlen("ns")))
+#endif
+ ) {
+ //
+ errorPrint("input precision: %s is invalid value\n", tmp);
+ free(tmp);
+ exit(-1);
+ }
+ strncpy(g_args.precision, tmp,
+ min(DB_PRECISION_LEN - 1, strlen(tmp)));
+ free(tmp);
+ }
+ }
}
-static void parse_args(int argc, char *argv[], SArguments *arguments) {
- for (int i = 1; i < argc; i++) {
- if ((strcmp(argv[i], "-S") == 0)
- || (strcmp(argv[i], "-E") == 0)) {
- if (argv[i+1]) {
- char *tmp = strdup(argv[++i]);
-
- if (tmp) {
- int64_t tmpEpoch;
- if (strchr(tmp, ':') && strchr(tmp, '-')) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
- fprintf(stderr, "Input end time error!\n");
- free(tmp);
- return;
+static void parse_timestamp(
+ int argc, char *argv[], SArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if ((strcmp(argv[i], "-S") == 0)
+ || (strcmp(argv[i], "-E") == 0)) {
+ if (NULL == argv[i+1]) {
+ errorPrint("%s need a valid value following!\n", argv[i]);
+ exit(-1);
+ }
+ char *tmp = strdup(argv[i+1]);
+ if (NULL == tmp) {
+ errorPrint("%s() LN%d, strdup() cannot allocate memory\n",
+ __func__, __LINE__);
+ exit(-1);
}
- } else {
- tmpEpoch = atoll(tmp);
- }
- sprintf(argv[i], "%"PRId64"", tmpEpoch);
- debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
- __func__, __LINE__, tmp, i, argv[i]);
+ int64_t tmpEpoch;
+ if (strchr(tmp, ':') && strchr(tmp, '-')) {
+ int32_t timePrec;
+ if (0 == strncasecmp(arguments->precision,
+ "ms", strlen("ms"))) {
+ timePrec = TSDB_TIME_PRECISION_MILLI;
+ } else if (0 == strncasecmp(arguments->precision,
+ "us", strlen("us"))) {
+ timePrec = TSDB_TIME_PRECISION_MICRO;
+#if TSDB_SUPPORT_NANOSECOND == 1
+ } else if (0 == strncasecmp(arguments->precision,
+ "ns", strlen("ns"))) {
+ timePrec = TSDB_TIME_PRECISION_NANO;
+#endif
+ } else {
+ errorPrint("Invalid time precision: %s",
+ arguments->precision);
+ free(tmp);
+ return;
+ }
+
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ tmp, &tmpEpoch, strlen(tmp),
+ timePrec, 0)) {
+ errorPrint("Input %s, end time error!\n", tmp);
+ free(tmp);
+ return;
+ }
+ } else {
+ tmpEpoch = atoll(tmp);
+ }
- free(tmp);
- } else {
- errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
- exit(-1);
+ sprintf(argv[i], "%"PRId64"", tmpEpoch);
+ debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
+ __func__, __LINE__, tmp, i, argv[i]);
+ free(tmp);
}
- } else {
- errorPrint("%s need a valid value following!\n", argv[i]);
- exit(-1);
- }
- } else if (strcmp(argv[i], "-g") == 0) {
- arguments->debug_print = true;
}
- }
}
int main(int argc, char *argv[]) {
- /* Parse our arguments; every option seen by parse_opt will be
- reflected in arguments. */
- if (argc > 2)
- parse_args(argc, argv, &g_args);
+ int ret = 0;
+ /* Parse our arguments; every option seen by parse_opt will be
+ reflected in arguments. */
+ if (argc > 2) {
+ parse_precision_first(argc, argv, &g_args);
+ parse_timestamp(argc, argv, &g_args);
+ }
- argp_parse(&argp, argc, argv, 0, 0, &g_args);
+ argp_parse(&argp, argc, argv, 0, 0, &g_args);
- if (g_args.abort) {
- #ifndef _ALPINE
- error(10, 0, "ABORTED");
- #else
- abort();
- #endif
- }
+ if (g_args.abort) {
+#ifndef _ALPINE
+ error(10, 0, "ABORTED");
+#else
+ abort();
+#endif
+ }
- printf("====== arguments config ======\n");
- {
- printf("host: %s\n", g_args.host);
- printf("user: %s\n", g_args.user);
- printf("password: %s\n", g_args.password);
- printf("port: %u\n", g_args.port);
- printf("cversion: %s\n", g_args.cversion);
- printf("mysqlFlag: %d\n", g_args.mysqlFlag);
- printf("outpath: %s\n", g_args.outpath);
- printf("inpath: %s\n", g_args.inpath);
- printf("resultFile: %s\n", g_args.resultFile);
- printf("encode: %s\n", g_args.encode);
- printf("all_databases: %d\n", g_args.all_databases);
- printf("databases: %d\n", g_args.databases);
- printf("schemaonly: %d\n", g_args.schemaonly);
- printf("with_property: %d\n", g_args.with_property);
- printf("start_time: %" PRId64 "\n", g_args.start_time);
- printf("end_time: %" PRId64 "\n", g_args.end_time);
- printf("data_batch: %d\n", g_args.data_batch);
- printf("max_sql_len: %d\n", g_args.max_sql_len);
- printf("table_batch: %d\n", g_args.table_batch);
- printf("thread_num: %d\n", g_args.thread_num);
- printf("allow_sys: %d\n", g_args.allow_sys);
- printf("abort: %d\n", g_args.abort);
- printf("isDumpIn: %d\n", g_args.isDumpIn);
- printf("arg_list_len: %d\n", g_args.arg_list_len);
- printf("debug_print: %d\n", g_args.debug_print);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ printf("====== arguments config ======\n");
+ {
+ printf("host: %s\n", g_args.host);
+ printf("user: %s\n", g_args.user);
+ printf("password: %s\n", g_args.password);
+ printf("port: %u\n", g_args.port);
+ printf("cversion: %s\n", g_args.cversion);
+ printf("mysqlFlag: %d\n", g_args.mysqlFlag);
+ printf("outpath: %s\n", g_args.outpath);
+ printf("inpath: %s\n", g_args.inpath);
+ printf("resultFile: %s\n", g_args.resultFile);
+ printf("encode: %s\n", g_args.encode);
+ printf("all_databases: %s\n", g_args.all_databases?"true":"false");
+ printf("databases: %d\n", g_args.databases);
+ printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
+ printf("with_property: %s\n", g_args.with_property?"true":"false");
+ printf("avro format: %s\n", g_args.avro?"true":"false");
+ printf("start_time: %" PRId64 "\n", g_args.start_time);
+ printf("end_time: %" PRId64 "\n", g_args.end_time);
+ printf("precision: %s\n", g_args.precision);
+ printf("data_batch: %d\n", g_args.data_batch);
+ printf("max_sql_len: %d\n", g_args.max_sql_len);
+ printf("table_batch: %d\n", g_args.table_batch);
+ printf("thread_num: %d\n", g_args.thread_num);
+ printf("allow_sys: %d\n", g_args.allow_sys);
+ printf("abort: %d\n", g_args.abort);
+ printf("isDumpIn: %d\n", g_args.isDumpIn);
+ printf("arg_list_len: %d\n", g_args.arg_list_len);
+ printf("debug_print: %d\n", g_args.debug_print);
+
+ for (int32_t i = 0; i < g_args.arg_list_len; i++) {
+ printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ }
}
- }
- printf("==============================\n");
+ printf("==============================\n");
- if (g_args.cversion[0] != 0){
- tstrncpy(version, g_args.cversion, 11);
- }
+ if (g_args.cversion[0] != 0){
+ tstrncpy(version, g_args.cversion, 11);
+ }
- if (taosCheckParam(&g_args) < 0) {
- exit(EXIT_FAILURE);
- }
+ if (taosCheckParam(&g_args) < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ g_fpOfResult = fopen(g_args.resultFile, "a");
+ if (NULL == g_fpOfResult) {
+ errorPrint("Failed to open %s for save result\n", g_args.resultFile);
+ exit(-1);
+ };
- g_fpOfResult = fopen(g_args.resultFile, "a");
- if (NULL == g_fpOfResult) {
- fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile);
- return 1;
- };
-
- fprintf(g_fpOfResult, "#############################################################################\n");
- fprintf(g_fpOfResult, "============================== arguments config =============================\n");
- {
- fprintf(g_fpOfResult, "host: %s\n", g_args.host);
- fprintf(g_fpOfResult, "user: %s\n", g_args.user);
- fprintf(g_fpOfResult, "password: %s\n", g_args.password);
- fprintf(g_fpOfResult, "port: %u\n", g_args.port);
- fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
- fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
- fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
- fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
- fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
- fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
- fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases);
- fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
- fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly);
- fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property);
- fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
- fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
- fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
- fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
- fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
- fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
- fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
- fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
- fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
- fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ fprintf(g_fpOfResult, "#############################################################################\n");
+ fprintf(g_fpOfResult, "============================== arguments config =============================\n");
+ {
+ fprintf(g_fpOfResult, "host: %s\n", g_args.host);
+ fprintf(g_fpOfResult, "user: %s\n", g_args.user);
+ fprintf(g_fpOfResult, "password: %s\n", g_args.password);
+ fprintf(g_fpOfResult, "port: %u\n", g_args.port);
+ fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
+ fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
+ fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
+ fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
+ fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
+ fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
+ fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false");
+ fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
+ fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
+ fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
+ fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
+ fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
+ fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
+ fprintf(g_fpOfResult, "precision: %s\n", g_args.precision);
+ fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
+ fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
+ fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
+ fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
+ fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
+ fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
+ fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
+ fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
+
+ for (int32_t i = 0; i < g_args.arg_list_len; i++) {
+ fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ }
}
- }
- g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
-
- time_t tTime = time(NULL);
- struct tm tm = *localtime(&tTime);
-
- if (g_args.isDumpIn) {
- fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
- fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (taosDumpIn(&g_args) < 0) {
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
- return -1;
- }
- } else {
- fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
- fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (taosDumpOut(&g_args) < 0) {
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
- return -1;
- }
-
- fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
- fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut);
- fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut);
- }
+ g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
+ time_t tTime = time(NULL);
+ struct tm tm = *localtime(&tTime);
- return 0;
+ if (g_args.isDumpIn) {
+ fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
+ fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (taosDumpIn() < 0) {
+ ret = -1;
+ }
+ } else {
+ fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
+ fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (taosDumpOut() < 0) {
+ ret = -1;
+ } else {
+ fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
+ fprintf(g_fpOfResult, "# total database count: %d\n",
+ g_resultStatistics.totalDatabasesOfDumpOut);
+ fprintf(g_fpOfResult, "# total super table count: %d\n",
+ g_resultStatistics.totalSuperTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n",
+ g_resultStatistics.totalChildTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# total row count: %"PRId64"\n",
+ g_resultStatistics.totalRowsOfDumpOut);
+ }
+ }
+
+ fprintf(g_fpOfResult, "\n");
+ fclose(g_fpOfResult);
+
+ return ret;
}
-void taosFreeDbInfos() {
- if (dbInfos == NULL) return;
- for (int i = 0; i < 128; i++) tfree(dbInfos[i]);
- tfree(dbInfos);
+static void taosFreeDbInfos() {
+ if (g_dbInfos == NULL) return;
+ for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]);
+ tfree(g_dbInfos);
}
// check table is normal table or super table
-int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) {
- TAOS_ROW row = NULL;
- bool isSet = false;
- TAOS_RES *result = NULL;
+static int taosGetTableRecordInfo(
+ char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) {
+ TAOS_ROW row = NULL;
+ bool isSet = false;
+ TAOS_RES *result = NULL;
+
+ memset(pTableRecordInfo, 0, sizeof(STableRecordInfo));
+
+ char* tempCommand = (char *)malloc(COMMAND_SIZE);
+ if (tempCommand == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- memset(pTableRecordInfo, 0, sizeof(STableRecordInfo));
+ sprintf(tempCommand, "show tables like %s", table);
- char* tempCommand = (char *)malloc(COMMAND_SIZE);
- if (tempCommand == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+ result = taos_query(taosCon, tempCommand);
+ int32_t code = taos_errno(result);
- sprintf(tempCommand, "show tables like %s", table);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tempCommand);
+ free(tempCommand);
+ taos_free_result(result);
+ return -1;
+ }
- result = taos_query(taosCon, tempCommand);
- int32_t code = taos_errno(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+
+ while ((row = taos_fetch_row(result)) != NULL) {
+ isSet = true;
+ pTableRecordInfo->isMetric = false;
+ strncpy(pTableRecordInfo->tableRecord.name,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ strncpy(pTableRecordInfo->tableRecord.metric,
+ (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+ break;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tempCommand);
- free(tempCommand);
taos_free_result(result);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(result);
+ result = NULL;
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isMetric = false;
- strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
- break;
- }
+ if (isSet) {
+ free(tempCommand);
+ return 0;
+ }
- taos_free_result(result);
- result = NULL;
+ sprintf(tempCommand, "show stables like %s", table);
- if (isSet) {
- free(tempCommand);
- return 0;
- }
+ result = taos_query(taosCon, tempCommand);
+ code = taos_errno(result);
- sprintf(tempCommand, "show stables like %s", table);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tempCommand);
+ free(tempCommand);
+ taos_free_result(result);
+ return -1;
+ }
- result = taos_query(taosCon, tempCommand);
- code = taos_errno(result);
+ while ((row = taos_fetch_row(result)) != NULL) {
+ isSet = true;
+ pTableRecordInfo->isMetric = true;
+ tstrncpy(pTableRecordInfo->tableRecord.metric, table,
+ TSDB_TABLE_NAME_LEN);
+ break;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tempCommand);
- free(tempCommand);
taos_free_result(result);
- return -1;
- }
+ result = NULL;
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isMetric = true;
- tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN);
- break;
- }
-
- taos_free_result(result);
- result = NULL;
-
- if (isSet) {
+ if (isSet) {
+ free(tempCommand);
+ return 0;
+ }
+ errorPrint("%s() LN%d, invalid table/metric %s\n",
+ __func__, __LINE__, table);
free(tempCommand);
- return 0;
- }
- fprintf(stderr, "invalid table/metric %s\n", table);
- free(tempCommand);
- return -1;
+ return -1;
}
-int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) {
- STableRecord tableRecord;
+static int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter,
+ char* metric, int* fd) {
+ STableRecord tableRecord;
- if (-1 == *fd) {
- *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (*fd == -1) {
- fprintf(stderr, "failed to open temp file: .tables.tmp.0\n");
- return -1;
+ if (-1 == *fd) {
+ *fd = open(".tables.tmp.0",
+ O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (*fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: .tables.tmp.0\n",
+ __func__, __LINE__);
+ return -1;
+ }
}
- }
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
- tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
+ tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
- taosWrite(*fd, &tableRecord, sizeof(STableRecord));
- return 0;
+ taosWrite(*fd, &tableRecord, sizeof(STableRecord));
+ return 0;
}
+static int32_t taosSaveTableOfMetricToTempFile(
+ TAOS *taosCon, char* metric,
+ int32_t* totalNumOfThread) {
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
-int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) {
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
-
- char* tmpCommand = (char *)malloc(COMMAND_SIZE);
- if (tmpCommand == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+ char* tmpCommand = (char *)malloc(COMMAND_SIZE);
+ if (tmpCommand == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ return -1;
+ }
- sprintf(tmpCommand, "select tbname from %s", metric);
+ sprintf(tmpCommand, "select tbname from %s", metric);
- TAOS_RES *res = taos_query(taosCon, tmpCommand);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tmpCommand);
+ TAOS_RES *res = taos_query(taosCon, tmpCommand);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tmpCommand);
+ free(tmpCommand);
+ taos_free_result(res);
+ return -1;
+ }
free(tmpCommand);
- taos_free_result(res);
- return -1;
- }
- free(tmpCommand);
-
- char tmpBuf[TSDB_FILENAME_LEN + 1];
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".select-tbname.tmp");
- fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- taos_free_result(res);
- return -1;
- }
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ char tmpBuf[MAX_FILE_NAME_LEN];
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ taos_free_result(res);
+ return -1;
+ }
- int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
- tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+ int32_t numOfTable = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
- numOfTable++;
- }
- taos_free_result(res);
- lseek(fd, 0, SEEK_SET);
-
- int maxThreads = arguments->thread_num;
- int tableOfPerFile ;
- if (numOfTable <= arguments->thread_num) {
- tableOfPerFile = 1;
- maxThreads = numOfTable;
- } else {
- tableOfPerFile = numOfTable / arguments->thread_num;
- if (0 != numOfTable % arguments->thread_num) {
- tableOfPerFile += 1;
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
+ tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ numOfTable++;
}
- }
+ taos_free_result(res);
+ lseek(fd, 0, SEEK_SET);
- char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
- if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
- close(fd);
- return -1;
- }
+ int maxThreads = g_args.thread_num;
+ int tableOfPerFile ;
+ if (numOfTable <= g_args.thread_num) {
+ tableOfPerFile = 1;
+ maxThreads = numOfTable;
+ } else {
+ tableOfPerFile = numOfTable / g_args.thread_num;
+ if (0 != numOfTable % g_args.thread_num) {
+ tableOfPerFile += 1;
+ }
+ }
- int32_t numOfThread = *totalNumOfThread;
- int subFd = -1;
- for (; numOfThread < maxThreads; numOfThread++) {
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
- subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (subFd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
- (void)remove(tmpBuf);
- }
- sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
- free(tblBuf);
- close(fd);
- return -1;
+ char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
+ if (NULL == tblBuf){
+ errorPrint("%s() LN%d, failed to calloc %" PRIzu "\n",
+ __func__, __LINE__, tableOfPerFile * sizeof(STableRecord));
+ close(fd);
+ return -1;
}
- // read tableOfPerFile for fd, write to subFd
- ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
- if (readLen <= 0) {
- close(subFd);
- break;
+ int32_t numOfThread = *totalNumOfThread;
+ int subFd = -1;
+ for (; numOfThread < maxThreads; numOfThread++) {
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
+ subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (subFd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ (void)remove(tmpBuf);
+ free(tblBuf);
+ close(fd);
+ return -1;
+ }
+
+ // read tableOfPerFile for fd, write to subFd
+ ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
+ if (readLen <= 0) {
+ close(subFd);
+ break;
+ }
+ taosWrite(subFd, tblBuf, readLen);
+ close(subFd);
}
- taosWrite(subFd, tblBuf, readLen);
- close(subFd);
- }
- sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ (void)remove(tmpBuf);
- if (fd >= 0) {
- close(fd);
- fd = -1;
- }
+ if (fd >= 0) {
+ close(fd);
+ fd = -1;
+ }
- *totalNumOfThread = numOfThread;
+ *totalNumOfThread = numOfThread;
- free(tblBuf);
- return 0;
+ free(tblBuf);
+ return 0;
}
-int taosDumpOut(struct arguments *arguments) {
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
- char *command = NULL;
-
- TAOS_ROW row;
- FILE *fp = NULL;
- int32_t count = 0;
- STableRecordInfo tableRecordInfo;
-
- char tmpBuf[TSDB_FILENAME_LEN+9] = {0};
- if (arguments->outpath[0] != 0) {
- sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath);
- } else {
- sprintf(tmpBuf, "dbs.sql");
- }
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- return -1;
- }
+static int taosDumpOut() {
+ TAOS *taos = NULL;
+ TAOS_RES *result = NULL;
+ char *command = NULL;
- dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *));
- if (dbInfos == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
+ TAOS_ROW row;
+ FILE *fp = NULL;
+ int32_t count = 0;
+ STableRecordInfo tableRecordInfo;
- command = (char *)malloc(COMMAND_SIZE);
- if (command == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
+ char tmpBuf[4096] = {0};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
+ } else {
+ sprintf(tmpBuf, "dbs.sql");
+ }
- /* Connect to server */
- taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port);
- if (taos == NULL) {
- fprintf(stderr, "failed to connect to TDengine server\n");
- goto _exit_failure;
- }
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
- /* --------------------------------- Main Code -------------------------------- */
- /* if (arguments->databases || arguments->all_databases) { // dump part of databases or all databases */
- /* */
- taosDumpCharset(fp);
+ g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *));
+ if (g_dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ goto _exit_failure;
+ }
- sprintf(command, "show databases");
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
+ command = (char *)malloc(COMMAND_SIZE);
+ if (command == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ goto _exit_failure;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
- goto _exit_failure;
- }
+ /* Connect to server */
+ taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ goto _exit_failure;
+ }
- TAOS_FIELD *fields = taos_fetch_fields(result);
+ /* --------------------------------- Main Code -------------------------------- */
+ /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
+ /* */
+ taosDumpCharset(fp);
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 &&
- (!arguments->allow_sys))
- continue;
+ sprintf(command, "show databases");
+ result = taos_query(taos, command);
+ int32_t code = taos_errno(result);
- if (arguments->databases) { // input multi dbs
- for (int i = 0; arguments->arg_list[i]; i++) {
- if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
- }
- continue;
- } else if (!arguments->all_databases) { // only input one db
- if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
- else
- continue;
- }
-
- _dump_db_point:
-
- dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (dbInfos[count] == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
-
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
- if (arguments->with_property) {
- dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
- //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
- //dbInfos[count]->daysToKeep1;
- //dbInfos[count]->daysToKeep2;
- dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
- //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
- dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- }
- count++;
-
- if (arguments->databases) {
- if (count > arguments->arg_list_len) break;
-
- } else if (!arguments->all_databases) {
- if (count >= 1) break;
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(result));
+ goto _exit_failure;
}
- }
- if (count == 0) {
- fprintf(stderr, "No databases valid to dump\n");
- goto _exit_failure;
- }
+ TAOS_FIELD *fields = taos_fetch_fields(result);
- if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases
- for (int i = 0; i < count; i++) {
- taosDumpDb(dbInfos[i], arguments, fp, taos);
- }
- } else {
- if (arguments->arg_list_len == 1) { // case: taosdump
- taosDumpDb(dbInfos[0], arguments, fp, taos);
- } else { // case: taosdump tablex tabley ...
- taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp);
- fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfos[0]->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+ while ((row = taos_fetch_row(result)) != NULL) {
+ // sys database name : 'log', but subsequent version changed to 'log'
+ if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ && (!g_args.allow_sys)) {
+ continue;
+ }
- sprintf(command, "use %s", dbInfos[0]->name);
+ if (g_args.databases) { // input multi dbs
+ for (int i = 0; g_args.arg_list[i]; i++) {
+ if (strncasecmp(g_args.arg_list[i],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ goto _dump_db_point;
+ }
+ continue;
+ } else if (!g_args.all_databases) { // only input one db
+ if (strncasecmp(g_args.arg_list[0],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ goto _dump_db_point;
+ else
+ continue;
+ }
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
- goto _exit_failure;
- }
+_dump_db_point:
- fprintf(fp, "USE %s;\n\n", dbInfos[0]->name);
+ g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (g_dbInfos[count] == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
+ goto _exit_failure;
+ }
- int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0
- int normalTblFd = -1;
- int32_t retCode;
- int superTblCnt = 0 ;
- for (int i = 1; arguments->arg_list[i]; i++) {
- if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) {
- fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]);
- continue;
+ strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ if (g_args.with_property) {
+ g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ g_dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
+ //g_dbInfos[count]->daysToKeep1;
+ //g_dbInfos[count]->daysToKeep2;
+ g_dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ g_dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ g_dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ g_dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ g_dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ g_dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
+ g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
+ count++;
- if (tableRecordInfo.isMetric) { // dump all table of this metric
- int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
- if (0 == ret) {
- superTblCnt++;
- }
- retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread);
- } else {
- if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric
- int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
- if (0 == ret) {
- superTblCnt++;
- }
- }
- retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
+ if (g_args.databases) {
+ if (count > g_args.arg_list_len) break;
+
+ } else if (!g_args.all_databases) {
+ if (count >= 1) break;
}
+ }
+
+ if (count == 0) {
+ errorPrint("%d databases valid to dump\n", count);
+ goto _exit_failure;
+ }
- if (retCode < 0) {
- if (-1 != normalTblFd){
- taosClose(normalTblFd);
- }
- goto _clean_tmp_file;
+ if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases
+ for (int i = 0; i < count; i++) {
+ taosDumpDb(g_dbInfos[i], fp, taos);
}
- }
+ } else {
+ if (g_args.arg_list_len == 1) { // case: taosdump
+ taosDumpDb(g_dbInfos[0], fp, taos);
+ } else { // case: taosdump tablex tabley ...
+ taosDumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ g_dbInfos[0]->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
+
+ sprintf(command, "use %s", g_dbInfos[0]->name);
+
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("invalid database %s\n", g_dbInfos[0]->name);
+ goto _exit_failure;
+ }
- // TODO: save dump super table into result_output.txt
- fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name);
+
+ int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0
+ int normalTblFd = -1;
+ int32_t retCode;
+ int superTblCnt = 0 ;
+ for (int i = 1; g_args.arg_list[i]; i++) {
+ if (taosGetTableRecordInfo(g_args.arg_list[i],
+ &tableRecordInfo, taos) < 0) {
+ errorPrint("input the invalide table %s\n",
+ g_args.arg_list[i]);
+ continue;
+ }
+
+ if (tableRecordInfo.isMetric) { // dump all table of this metric
+ int ret = taosDumpStable(
+ tableRecordInfo.tableRecord.metric,
+ fp, taos, g_dbInfos[0]->name);
+ if (0 == ret) {
+ superTblCnt++;
+ }
+ retCode = taosSaveTableOfMetricToTempFile(
+ taos, tableRecordInfo.tableRecord.metric,
+ &totalNumOfThread);
+ } else {
+ if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric
+ int ret = taosDumpStable(
+ tableRecordInfo.tableRecord.metric,
+ fp, taos, g_dbInfos[0]->name);
+ if (0 == ret) {
+ superTblCnt++;
+ }
+ }
+ retCode = taosSaveAllNormalTableToTempFile(
+ taos, tableRecordInfo.tableRecord.name,
+ tableRecordInfo.tableRecord.metric, &normalTblFd);
+ }
+
+ if (retCode < 0) {
+ if (-1 != normalTblFd){
+ taosClose(normalTblFd);
+ }
+ goto _clean_tmp_file;
+ }
+ }
- if (-1 != normalTblFd){
- taosClose(normalTblFd);
- }
+ // TODO: save dump super table into result_output.txt
+ fprintf(g_fpOfResult, "# super table counter: %d\n",
+ superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- // start multi threads to dumpout
- taosStartDumpOutWorkThreads(taos, arguments, totalNumOfThread, dbInfos[0]->name);
+ if (-1 != normalTblFd){
+ taosClose(normalTblFd);
+ }
- char tmpFileName[TSDB_FILENAME_LEN + 1];
- _clean_tmp_file:
- for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) {
- sprintf(tmpFileName, ".tables.tmp.%d", loopCnt);
- remove(tmpFileName);
- }
+ // start multi threads to dumpout
+ taosStartDumpOutWorkThreads(totalNumOfThread,
+ g_dbInfos[0]->name);
+
+ char tmpFileName[MAX_FILE_NAME_LEN];
+_clean_tmp_file:
+ for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) {
+ sprintf(tmpFileName, ".tables.tmp.%d", loopCnt);
+ remove(tmpFileName);
+ }
+ }
}
- }
- /* Close the handle and return */
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- tfree(command);
- taosFreeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
- return 0;
+ /* Close the handle and return */
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ tfree(command);
+ taosFreeDbInfos();
+ fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return 0;
_exit_failure:
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- tfree(command);
- taosFreeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
- return -1;
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ tfree(command);
+ taosFreeDbInfos();
+ errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return -1;
}
-int taosGetTableDes(
+static int taosGetTableDes(
char* dbName, char *table,
STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
- TAOS_ROW row = NULL;
- TAOS_RES* res = NULL;
- int count = 0;
+ TAOS_ROW row = NULL;
+ TAOS_RES* res = NULL;
+ int count = 0;
- char sqlstr[COMMAND_SIZE];
- sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ char sqlstr[COMMAND_SIZE];
+ sprintf(sqlstr, "describe %s.%s;", dbName, table);
- tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
- while ((row = taos_fetch_row(res)) != NULL) {
- strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
- tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
-
- count++;
- }
+ res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- taos_free_result(res);
- res = NULL;
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
+ while ((row = taos_fetch_row(res)) != NULL) {
+ strncpy(tableDes->cols[count].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ strncpy(tableDes->cols[count].type,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
+ tableDes->cols[count].length =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ strncpy(tableDes->cols[count].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+
+ count++;
+ }
- if (isSuperTable) {
- return count;
- }
+ taos_free_result(res);
+ res = NULL;
- // if chidl-table have tag, using select tagName from table to get tagValue
- for (int i = 0 ; i < count; i++) {
- if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
+ if (isSuperTable) {
+ return count;
+ }
+ // if chidl-table have tag, using select tagName from table to get tagValue
+ for (int i = 0 ; i < count; i++) {
+ if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
- sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
- res = taos_query(taosCon, sqlstr);
- code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
+ sprintf(sqlstr, "select %s from %s.%s",
+ tableDes->cols[i].field, dbName, table);
- fields = taos_fetch_fields(res);
+ res = taos_query(taosCon, sqlstr);
+ code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- row = taos_fetch_row(res);
- if (NULL == row) {
- fprintf(stderr, " fetch failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
+ fields = taos_fetch_fields(res);
- if (row[0] == NULL) {
- sprintf(tableDes->cols[i].note, "%s", "NULL");
- taos_free_result(res);
- res = NULL;
- continue;
- }
+ row = taos_fetch_row(res);
+ if (NULL == row) {
+ errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- int32_t* length = taos_fetch_lengths(res);
+ if (row[0] == NULL) {
+ sprintf(tableDes->cols[i].note, "%s", "NULL");
+ taos_free_result(res);
+ res = NULL;
+ continue;
+ }
- //int32_t* length = taos_fetch_lengths(tmpResult);
- switch (fields[0].type) {
- case TSDB_DATA_TYPE_BOOL:
- sprintf(tableDes->cols[i].note, "%d", ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_INT:
- sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
- break;
- case TSDB_DATA_TYPE_BINARY: {
- memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- tableDes->cols[i].note[0] = '\'';
- char tbuf[COL_NOTE_LEN];
- converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
- char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
- *(pstr++) = '\'';
- break;
- }
- case TSDB_DATA_TYPE_NCHAR: {
- memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
- convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
- sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
- #if 0
- if (!arguments->mysqlFlag) {
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[0]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000));
+ int32_t* length = taos_fetch_lengths(res);
+
+ //int32_t* length = taos_fetch_lengths(tmpResult);
+ switch (fields[0].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ sprintf(tableDes->cols[i].note, "%d",
+ ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
+ tableDes->cols[i].note[0] = '\'';
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
+ char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
+ *(pstr++) = '\'';
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
+ sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
+#if 0
+ if (!g_args.mysqlFlag) {
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[0]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000));
+ }
+#endif
+ break;
+ default:
+ break;
}
- #endif
- break;
- default:
- break;
+
+ taos_free_result(res);
+ res = NULL;
}
- taos_free_result(res);
- res = NULL;
- }
+ return count;
+}
- return count;
+static int convertSchemaToAvroSchema(STableDef *tableDes, char **avroSchema)
+{
+ errorPrint("%s() LN%d TODO: covert table schema to avro schema\n",
+ __func__, __LINE__);
+ return 0;
}
-int32_t taosDumpTable(
- char *table, char *metric, struct arguments *arguments,
+static int32_t taosDumpTable(
+ char *table, char *metric,
FILE *fp, TAOS* taosCon, char* dbName) {
- int count = 0;
+ int count = 0;
- STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef)
+ + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table
- /*
- count = taosGetTableDes(metric, tableDes, taosCon);
+ if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table
+ /*
+ count = taosGetTableDes(metric, tableDes, taosCon);
- if (count < 0) {
- free(tableDes);
- return -1;
- }
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
- taosDumpCreateTableClause(tableDes, count, fp);
+ taosDumpCreateTableClause(tableDes, count, fp);
- memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- */
+ memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ */
- count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
+ count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
- if (count < 0) {
- free(tableDes);
- return -1;
- }
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
- // create child-table using super-table
- taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName);
+ // create child-table using super-table
+ taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName);
- } else { // dump table definition
- count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
+ } else { // dump table definition
+ count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
- if (count < 0) {
- free(tableDes);
- return -1;
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
+
+ // create normal-table or super-table
+ taosDumpCreateTableClause(tableDes, count, fp, dbName);
}
- // create normal-table or super-table
- taosDumpCreateTableClause(tableDes, count, fp, dbName);
- }
+ char *jsonAvroSchema = NULL;
+ if (g_args.avro) {
+ convertSchemaToAvroSchema(tableDes, &jsonAvroSchema);
+ }
+
+ free(tableDes);
- free(tableDes);
+ int32_t ret = 0;
+ if (!g_args.schemaonly) {
+ ret = taosDumpTableData(fp, table, taosCon, dbName,
+ jsonAvroSchema);
+ }
- return taosDumpTableData(fp, table, arguments, taosCon, dbName);
+ return ret;
}
-void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
-
- char *pstr = sqlstr;
- pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
- if (isDumpProperty) {
- pstr += sprintf(pstr,
- "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
- dbInfo->comp, dbInfo->precision, dbInfo->update);
- }
+static void taosDumpCreateDbClause(
+ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+
+ char *pstr = sqlstr;
+ pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
+ if (isDumpProperty) {
+ pstr += sprintf(pstr,
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days,
+ dbInfo->keeplist,
+ dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
+ dbInfo->fsync,
+ dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
+ }
- pstr += sprintf(pstr, ";");
- fprintf(fp, "%s\n\n", sqlstr);
+ pstr += sprintf(pstr, ";");
+ fprintf(fp, "%s\n\n", sqlstr);
}
-void* taosDumpOutWorkThreadFp(void *arg)
+static void* taosDumpOutWorkThreadFp(void *arg)
{
- SThreadParaObj *pThread = (SThreadParaObj*)arg;
- STableRecord tableRecord;
- int fd;
-
- char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
- sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
- fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpBuf);
- return NULL;
- }
-
- FILE *fp = NULL;
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+ SThreadParaObj *pThread = (SThreadParaObj*)arg;
+ STableRecord tableRecord;
+ int fd;
+
+ char tmpBuf[4096] = {0};
+ sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
+ fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
+ }
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
- }
+ FILE *fp = NULL;
+ memset(tmpBuf, 0, 4096);
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- close(fd);
- return NULL;
- }
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.tables.%d.sql",
+ g_args.outpath, pThread->dbName, pThread->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.tables.%d.sql",
+ pThread->dbName, pThread->threadIndex);
+ }
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, "use %s", pThread->dbName);
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ close(fd);
+ return NULL;
+ }
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
- int32_t code = taos_errno(tmpResult);
- if (code != 0) {
- fprintf(stderr, "invalid database %s\n", pThread->dbName);
- taos_free_result(tmpResult);
- fclose(fp);
- close(fd);
- return NULL;
- }
+ memset(tmpBuf, 0, 4096);
+ sprintf(tmpBuf, "use %s", pThread->dbName);
- int fileNameIndex = 1;
- int tablesInOneFile = 0;
- int64_t lastRowsPrint = 5000000;
- fprintf(fp, "USE %s;\n\n", pThread->dbName);
- while (1) {
- ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
- if (readLen <= 0) break;
-
- int ret = taosDumpTable(
- tableRecord.name, tableRecord.metric, &g_args,
- fp, pThread->taosCon, pThread->dbName);
- if (ret >= 0) {
- // TODO: sum table count and table rows by self
- pThread->tablesOfDumpOut++;
- pThread->rowsOfDumpOut += ret;
-
- if (pThread->rowsOfDumpOut >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from database %s\n",
- pThread->rowsOfDumpOut, pThread->dbName);
- lastRowsPrint += 5000000;
- }
-
- tablesInOneFile++;
- if (tablesInOneFile >= g_args.table_batch) {
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+ int32_t code = taos_errno(tmpResult);
+ if (code != 0) {
+ errorPrint("%s() LN%d, invalid database %s. reason: %s\n",
+ __func__, __LINE__, pThread->dbName, taos_errstr(tmpResult));
+ taos_free_result(tmpResult);
fclose(fp);
- tablesInOneFile = 0;
+ close(fd);
+ return NULL;
+ }
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
- g_args.outpath, pThread->dbName,
- pThread->threadIndex, fileNameIndex);
- } else {
- sprintf(tmpBuf, "%s.tables.%d-%d.sql",
- pThread->dbName, pThread->threadIndex, fileNameIndex);
- }
- fileNameIndex++;
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- close(fd);
- taos_free_result(tmpResult);
- return NULL;
+#if 0
+ int fileNameIndex = 1;
+ int tablesInOneFile = 0;
+#endif
+ int64_t lastRowsPrint = 5000000;
+ fprintf(fp, "USE %s;\n\n", pThread->dbName);
+ while (1) {
+ ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
+ if (readLen <= 0) break;
+
+ int ret = taosDumpTable(
+ tableRecord.name, tableRecord.metric,
+ fp, pThread->taosCon, pThread->dbName);
+ if (ret >= 0) {
+ // TODO: sum table count and table rows by self
+ pThread->tablesOfDumpOut++;
+ pThread->rowsOfDumpOut += ret;
+
+ if (pThread->rowsOfDumpOut >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from database %s\n",
+ pThread->rowsOfDumpOut, pThread->dbName);
+ lastRowsPrint += 5000000;
+ }
+
+#if 0
+ tablesInOneFile++;
+ if (tablesInOneFile >= g_args.table_batch) {
+ fclose(fp);
+ tablesInOneFile = 0;
+
+ memset(tmpBuf, 0, 4096);
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
+ g_args.outpath, pThread->dbName,
+ pThread->threadIndex, fileNameIndex);
+ } else {
+ sprintf(tmpBuf, "%s.tables.%d-%d.sql",
+ pThread->dbName, pThread->threadIndex, fileNameIndex);
+ }
+ fileNameIndex++;
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ close(fd);
+ taos_free_result(tmpResult);
+ return NULL;
+ }
+ }
+#endif
}
- }
}
- }
- taos_free_result(tmpResult);
- close(fd);
- fclose(fp);
+ taos_free_result(tmpResult);
+ close(fd);
+ fclose(fp);
- return NULL;
+ return NULL;
}
-static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
+static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
{
- pthread_attr_t thattr;
- SThreadParaObj *threadObj =
- (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
- for (int t = 0; t < numOfThread; ++t) {
- SThreadParaObj *pThread = threadObj + t;
- pThread->rowsOfDumpOut = 0;
- pThread->tablesOfDumpOut = 0;
- pThread->threadIndex = t;
- pThread->totalThreads = numOfThread;
- tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
- pThread->taosCon = taosCon;
-
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
-
- if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) {
- fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex);
- exit(0);
+ pthread_attr_t thattr;
+ SThreadParaObj *threadObj =
+ (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
+
+ if (threadObj == NULL) {
+ errorPrint("%s() LN%d, memory allocation failed!\n",
+ __func__, __LINE__);
+ return;
}
- }
- for (int32_t t = 0; t < numOfThread; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
+ for (int t = 0; t < numOfThread; ++t) {
+ SThreadParaObj *pThread = threadObj + t;
+ pThread->rowsOfDumpOut = 0;
+ pThread->tablesOfDumpOut = 0;
+ pThread->threadIndex = t;
+ pThread->totalThreads = numOfThread;
+ tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taosCon == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ free(threadObj);
+ return;
+ }
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&(pThread->threadID), &thattr,
+ taosDumpOutWorkThreadFp,
+ (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread:%d failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(-1);
+ }
+ }
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
- int64_t totalRowsOfDumpOut = 0;
- int64_t totalChildTblsOfDumpOut = 0;
- for (int32_t t = 0; t < numOfThread; ++t) {
- totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut;
- totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut;
- }
+ for (int32_t t = 0; t < numOfThread; ++t) {
+ pthread_join(threadObj[t].threadID, NULL);
+ }
+
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ int64_t totalRowsOfDumpOut = 0;
+ int64_t totalChildTblsOfDumpOut = 0;
+ for (int32_t t = 0; t < numOfThread; ++t) {
+ totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut;
+ totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut;
+ }
- fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", totalChildTblsOfDumpOut);
- fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", totalRowsOfDumpOut);
- g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut;
- g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut;
- free(threadObj);
+ fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n",
+ totalChildTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# row counter: %"PRId64"\n",
+ totalRowsOfDumpOut);
+ g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut;
+ g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut;
+ free(threadObj);
}
+static int32_t taosDumpStable(char *table, FILE *fp,
+ TAOS* taosCon, char* dbName) {
+ uint64_t sizeOfTableDes = (uint64_t)(sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ STableDef *tableDes = (STableDef *)calloc(1, sizeOfTableDes);
+ if (NULL == tableDes) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, sizeOfTableDes);
+ exit(-1);
+ }
-int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
- int count = 0;
+ int count = taosGetTableDes(dbName, table, tableDes, taosCon, true);
- STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- if (NULL == tableDes) {
- fprintf(stderr, "failed to allocate memory\n");
- exit(-1);
- }
+ if (count < 0) {
+ free(tableDes);
+ errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
+ __func__, __LINE__, table);
+ exit(-1);
+ }
- count = taosGetTableDes(dbName, table, tableDes, taosCon, true);
+ taosDumpCreateTableClause(tableDes, count, fp, dbName);
- if (count < 0) {
free(tableDes);
- fprintf(stderr, "failed to get stable[%s] schema\n", table);
- exit(-1);
- }
-
- taosDumpCreateTableClause(tableDes, count, fp, dbName);
-
- free(tableDes);
- return 0;
+ return 0;
}
-
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- sprintf(sqlstr, "show %s.stables", dbName);
+ sprintf(sqlstr, "show %s.stables", dbName);
- TAOS_RES* res = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- exit(-1);
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ exit(-1);
+ }
- char tmpFileName[TSDB_FILENAME_LEN + 1];
- memset(tmpFileName, 0, TSDB_FILENAME_LEN);
- sprintf(tmpFileName, ".stables.tmp");
- fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpFileName);
- taos_free_result(res);
- (void)remove(".stables.tmp");
- exit(-1);
- }
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ char tmpFileName[MAX_FILE_NAME_LEN];
+ memset(tmpFileName, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpFileName, ".stables.tmp");
+ fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpFileName);
+ taos_free_result(res);
+ (void)remove(".stables.tmp");
+ exit(-1);
+ }
- while ((row = taos_fetch_row(res)) != NULL) {
- memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
+ while ((row = taos_fetch_row(res)) != NULL) {
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ }
- taos_free_result(res);
- (void)lseek(fd, 0, SEEK_SET);
+ taos_free_result(res);
+ (void)lseek(fd, 0, SEEK_SET);
- int superTblCnt = 0;
- while (1) {
- ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
- if (readLen <= 0) break;
+ int superTblCnt = 0;
+ while (1) {
+ ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
+ if (readLen <= 0) break;
- int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
- if (0 == ret) {
- superTblCnt++;
+ int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
+ if (0 == ret) {
+ superTblCnt++;
+ }
}
- }
- // TODO: save dump super table into result_output.txt
- fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ // TODO: save dump super table into result_output.txt
+ fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- close(fd);
- (void)remove(".stables.tmp");
+ close(fd);
+ (void)remove(".stables.tmp");
- return 0;
+ return 0;
}
-int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) {
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
+static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
- taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
+ taosDumpCreateDbClause(dbInfo, g_args.with_property, fp);
- fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ dbInfo->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- fprintf(fp, "USE %s;\n\n", dbInfo->name);
+ fprintf(fp, "USE %s;\n\n", dbInfo->name);
- (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
+ (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
- sprintf(sqlstr, "show %s.tables", dbInfo->name);
+ sprintf(sqlstr, "show %s.tables", dbInfo->name);
- TAOS_RES* res = taos_query(taosCon, sqlstr);
- int code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
-
- char tmpBuf[TSDB_FILENAME_LEN + 1];
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".show-tables.tmp");
- fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- taos_free_result(res);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
+ char tmpBuf[MAX_FILE_NAME_LEN];
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".show-tables.tmp");
+ fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ taos_free_result(res);
+ return -1;
+ }
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- numOfTable++;
- }
- taos_free_result(res);
- lseek(fd, 0, SEEK_SET);
-
- int maxThreads = g_args.thread_num;
- int tableOfPerFile ;
- if (numOfTable <= g_args.thread_num) {
- tableOfPerFile = 1;
- maxThreads = numOfTable;
- } else {
- tableOfPerFile = numOfTable / g_args.thread_num;
- if (0 != numOfTable % g_args.thread_num) {
- tableOfPerFile += 1;
- }
- }
+ int32_t numOfTable = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
- char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
- if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
- close(fd);
- return -1;
- }
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
- int32_t numOfThread = 0;
- int subFd = -1;
- for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
- subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (subFd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
- (void)remove(tmpBuf);
- }
- sprintf(tmpBuf, ".show-tables.tmp");
- (void)remove(tmpBuf);
- free(tblBuf);
- close(fd);
- return -1;
+ numOfTable++;
}
+ taos_free_result(res);
+ lseek(fd, 0, SEEK_SET);
- // read tableOfPerFile for fd, write to subFd
- ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
- if (readLen <= 0) {
- close(subFd);
- break;
+ int maxThreads = g_args.thread_num;
+ int tableOfPerFile ;
+ if (numOfTable <= g_args.thread_num) {
+ tableOfPerFile = 1;
+ maxThreads = numOfTable;
+ } else {
+ tableOfPerFile = numOfTable / g_args.thread_num;
+ if (0 != numOfTable % g_args.thread_num) {
+ tableOfPerFile += 1;
+ }
}
- taosWrite(subFd, tblBuf, readLen);
- close(subFd);
- }
- sprintf(tmpBuf, ".show-tables.tmp");
- (void)remove(tmpBuf);
+ char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
+ if (NULL == tblBuf){
+ errorPrint("failed to calloc %" PRIzu "\n",
+ tableOfPerFile * sizeof(STableRecord));
+ close(fd);
+ return -1;
+ }
- if (fd >= 0) {
- close(fd);
- fd = -1;
- }
+ int32_t numOfThread = 0;
+ int subFd = -1;
+ for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
+ subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (subFd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+ sprintf(tmpBuf, ".show-tables.tmp");
+ (void)remove(tmpBuf);
+ free(tblBuf);
+ close(fd);
+ return -1;
+ }
- taos_free_result(res);
+ // read tableOfPerFile for fd, write to subFd
+ ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
+ if (readLen <= 0) {
+ close(subFd);
+ break;
+ }
+ taosWrite(subFd, tblBuf, readLen);
+ close(subFd);
+ }
- // start multi threads to dumpout
- taosStartDumpOutWorkThreads(taosCon, arguments, numOfThread, dbInfo->name);
- for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ sprintf(tmpBuf, ".show-tables.tmp");
(void)remove(tmpBuf);
- }
- free(tblBuf);
- return 0;
+ if (fd >= 0) {
+ close(fd);
+ fd = -1;
+ }
+
+ // start multi threads to dumpout
+ taosStartDumpOutWorkThreads(numOfThread, dbInfo->name);
+ for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+
+ free(tblBuf);
+ return 0;
}
-void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName) {
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName) {
int counter = 0;
int count_temp = 0;
char sqlstr[COMMAND_SIZE];
@@ -1704,257 +1901,291 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
fprintf(fp, "%s\n\n", sqlstr);
}
-void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName) {
- int counter = 0;
- int count_temp = 0;
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
+ int numOfCols, FILE *fp, char* dbName) {
+ int counter = 0;
+ int count_temp = 0;
- char* tmpBuf = (char *)malloc(COMMAND_SIZE);
- if (tmpBuf == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return;
- }
+ char* tmpBuf = (char *)malloc(COMMAND_SIZE);
+ if (tmpBuf == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %d memory\n",
+ __func__, __LINE__, COMMAND_SIZE);
+ return;
+ }
- char *pstr = NULL;
- pstr = tmpBuf;
+ char *pstr = NULL;
+ pstr = tmpBuf;
- pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
- dbName, tableDes->name, dbName, metric);
+ pstr += sprintf(tmpBuf,
+ "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
+ dbName, tableDes->name, dbName, metric);
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
- }
+ for (; counter < numOfCols; counter++) {
+ if (tableDes->cols[counter].note[0] != '\0') break;
+ }
- assert(counter < numOfCols);
- count_temp = counter;
+ assert(counter < numOfCols);
+ count_temp = counter;
+
+ for (; counter < numOfCols; counter++) {
+ if (counter != count_temp) {
+ if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
+ strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
+ pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
+ } else {
+ pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
+ }
+ } else {
+ if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
+ strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
+ pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
+ } else {
+ pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
+ }
+ /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */
+ }
- for (; counter < numOfCols; counter++) {
- if (counter != count_temp) {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
- pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
- } else {
- pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
- }
- } else {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
- pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
- } else {
- pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
- }
- /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */
+ /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar")
+ * == 0) { */
+ /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */
+ /* } */
}
- /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar")
- * == 0) { */
- /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */
- /* } */
- }
-
- pstr += sprintf(pstr, ");");
+ pstr += sprintf(pstr, ");");
- fprintf(fp, "%s\n", tmpBuf);
- free(tmpBuf);
+ fprintf(fp, "%s\n", tmpBuf);
+ free(tmpBuf);
}
-int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName) {
- int64_t lastRowsPrint = 5000000;
- int64_t totalRows = 0;
- int count = 0;
- char *pstr = NULL;
- TAOS_ROW row = NULL;
- int numFields = 0;
+static int writeSchemaToAvro(char *jsonAvroSchema)
+{
+ errorPrint("%s() LN%d, TODO: implement write schema to avro",
+ __func__, __LINE__);
+ return 0;
+}
- if (arguments->schemaonly) {
+static int64_t writeResultToAvro(TAOS_RES *res)
+{
+ errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
return 0;
- }
+}
- int32_t sql_buf_len = arguments->max_sql_len;
- char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
- if (tmpBuffer == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
+{
+ int64_t totalRows = 0;
+
+ int32_t sql_buf_len = g_args.max_sql_len;
+ char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
+ if (tmpBuffer == NULL) {
+ errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
+ return -1;
+ }
- pstr = tmpBuffer;
+ char *pstr = tmpBuffer;
- char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbname, arguments->start_time, arguments->end_time);
+ TAOS_ROW row = NULL;
+ int numFields = 0;
+ int rowFlag = 0;
+ int64_t lastRowsPrint = 5000000;
+ int count = 0;
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(tmpResult);
- if (code != 0) {
- fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
- free(tmpBuffer);
- taos_free_result(tmpResult);
- return -1;
- }
+ numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- numFields = taos_field_count(tmpResult);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(tmpResult);
+ int32_t curr_sqlstr_len = 0;
+ int32_t total_sqlstr_len = 0;
- int rowFlag = 0;
- int32_t curr_sqlstr_len = 0;
- int32_t total_sqlstr_len = 0;
- count = 0;
- while ((row = taos_fetch_row(tmpResult)) != NULL) {
- pstr = tmpBuffer;
- curr_sqlstr_len = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ curr_sqlstr_len = 0;
- int32_t* length = taos_fetch_lengths(tmpResult); // act len
+ int32_t* length = taos_fetch_lengths(res); // act len
- if (count == 0) {
- total_sqlstr_len = 0;
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbname);
- } else {
- if (arguments->mysqlFlag) {
- if (0 == rowFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- rowFlag++;
+ if (count == 0) {
+ total_sqlstr_len = 0;
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ if (g_args.mysqlFlag) {
+ if (0 == rowFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ rowFlag++;
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ }
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ }
}
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- }
- }
- for (int col = 0; col < numFields; col++) {
- if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+ for (int col = 0; col < numFields; col++) {
+ if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
- if (row[col] == NULL) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
- continue;
- }
+ if (row[col] == NULL) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
+ continue;
+ }
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_INT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_BINARY: {
- char tbuf[COMMAND_SIZE] = {0};
- //*(pstr++) = '\'';
- converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- //pstr = stpcpy(pstr, tbuf);
- //*(pstr++) = '\'';
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
+ *((int64_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ //*(pstr++) = '\'';
+ converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
+ //pstr = stpcpy(pstr, tbuf);
+ //*(pstr++) = '\'';
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!g_args.mysqlFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
+ *(int64_t *)row[col]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[col]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
+ buf, (int)(ts % 1000));
+ }
+ break;
+ default:
+ break;
+ }
}
- case TSDB_DATA_TYPE_NCHAR: {
- char tbuf[COMMAND_SIZE] = {0};
- convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
+
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+
+ totalRows++;
+ count++;
+ fprintf(fp, "%s", tmpBuffer);
+
+ if (totalRows >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
+ totalRows, dbName, tbName);
+ lastRowsPrint += 5000000;
+ }
+
+ total_sqlstr_len += curr_sqlstr_len;
+
+ if ((count >= g_args.data_batch)
+ || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
+ fprintf(fp, ";\n");
+ count = 0;
}
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!arguments->mysqlFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[col]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000));
- }
- break;
- default:
- break;
- }
}
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
+ debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
- totalRows++;
- count++;
- fprintf(fp, "%s", tmpBuffer);
+ fprintf(fp, "\n");
+ atomic_add_fetch_64(&g_totalDumpOutRows, totalRows);
+ free(tmpBuffer);
- if (totalRows >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
- lastRowsPrint += 5000000;
- }
+ return 0;
+}
- total_sqlstr_len += curr_sqlstr_len;
+static int taosDumpTableData(FILE *fp, char *tbName,
+ TAOS* taosCon, char* dbName,
+ char *jsonAvroSchema) {
+ int64_t totalRows = 0;
- if ((count >= arguments->data_batch)
- || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
- fprintf(fp, ";\n");
- count = 0;
- } //else {
- //fprintf(fp, "\\\n");
- //}
- }
+ char sqlstr[1024] = {0};
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ dbName, tbName, g_args.start_time, g_args.end_time);
- printf("total_sqlstr_len: %d\n", total_sqlstr_len);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("failed to run command %s, reason: %s\n",
+ sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- fprintf(fp, "\n");
- atomic_add_fetch_64(&totalDumpOutRows, totalRows);
+ if (g_args.avro) {
+ writeSchemaToAvro(jsonAvroSchema);
+ totalRows = writeResultToAvro(res);
+ } else {
+ totalRows = writeResultToSql(res, fp, dbName, tbName);
+ }
- taos_free_result(tmpResult);
- free(tmpBuffer);
- return totalRows;
+ taos_free_result(res);
+ return totalRows;
}
-int taosCheckParam(struct arguments *arguments) {
- if (arguments->all_databases && arguments->databases) {
- fprintf(stderr, "conflict option --all-databases and --databases\n");
- return -1;
- }
+static int taosCheckParam(struct arguments *arguments) {
+ if (g_args.all_databases && g_args.databases) {
+ fprintf(stderr, "conflict option --all-databases and --databases\n");
+ return -1;
+ }
- if (arguments->start_time > arguments->end_time) {
- fprintf(stderr, "start time is larger than end time\n");
- return -1;
- }
+ if (g_args.start_time > g_args.end_time) {
+ fprintf(stderr, "start time is larger than end time\n");
+ return -1;
+ }
- if (arguments->arg_list_len == 0) {
- if ((!arguments->all_databases) && (!arguments->isDumpIn)) {
- fprintf(stderr, "taosdump requires parameters\n");
- return -1;
+ if (g_args.arg_list_len == 0) {
+ if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
+ fprintf(stderr, "taosdump requires parameters\n");
+ return -1;
+ }
+ }
+ /*
+ if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
+ fprintf(stderr, "duplicate parameter input and output file path\n");
+ return -1;
+ }
+ */
+ if (!g_args.isDumpIn && g_args.encode != NULL) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
}
- }
-/*
- if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) {
- fprintf(stderr, "duplicate parameter input and output file path\n");
- return -1;
- }
-*/
- if (!arguments->isDumpIn && arguments->encode != NULL) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
- if (arguments->table_batch <= 0) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
+ if (g_args.table_batch <= 0) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
- return 0;
+ return 0;
}
-bool isEmptyCommand(char *cmd) {
+/*
+static bool isEmptyCommand(char *cmd) {
char *pchar = cmd;
while (*pchar != '\0') {
@@ -1965,8 +2196,8 @@ bool isEmptyCommand(char *cmd) {
return true;
}
-void taosReplaceCtrlChar(char *str) {
- _Bool ctrlOn = false;
+static void taosReplaceCtrlChar(char *str) {
+ bool ctrlOn = false;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) {
@@ -2008,6 +2239,7 @@ void taosReplaceCtrlChar(char *str) {
*pstr = '\0';
}
+*/
char *ascii_literal_list[] = {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
@@ -2031,374 +2263,421 @@ char *ascii_literal_list[] = {
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
-int converStringToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- while (size > 0) {
- if (*pstr == '\0') break;
- pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
- pstr++;
- size--;
- }
- *pbuf = '\0';
- return 0;
+static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
+ pstr++;
+ size--;
+ }
+ *pbuf = '\0';
+ return 0;
}
-int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- // TODO
- wchar_t wc;
- while (size > 0) {
- if (*pstr == '\0') break;
- int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
- if (byte_width < 0) {
- fprintf(stderr, "mbtowc() return fail.\n");
- exit(-1);
- }
-
- if ((int)wc < 256) {
- pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
- } else {
- memcpy(pbuf, pstr, byte_width);
- pbuf += byte_width;
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ // TODO
+ wchar_t wc;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
+ if (byte_width < 0) {
+ errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
+ exit(-1);
+ }
+
+ if ((int)wc < 256) {
+ pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ } else {
+ memcpy(pbuf, pstr, byte_width);
+ pbuf += byte_width;
+ }
+ pstr += byte_width;
}
- pstr += byte_width;
- }
- *pbuf = '\0';
+ *pbuf = '\0';
- return 0;
+ return 0;
}
-void taosDumpCharset(FILE *fp) {
- char charsetline[256];
+static void taosDumpCharset(FILE *fp) {
+ char charsetline[256];
- (void)fseek(fp, 0, SEEK_SET);
- sprintf(charsetline, "#!%s\n", tsCharset);
- (void)fwrite(charsetline, strlen(charsetline), 1, fp);
+ (void)fseek(fp, 0, SEEK_SET);
+ sprintf(charsetline, "#!%s\n", tsCharset);
+ (void)fwrite(charsetline, strlen(charsetline), 1, fp);
}
-void taosLoadFileCharset(FILE *fp, char *fcharset) {
- char * line = NULL;
- size_t line_size = 0;
+static void taosLoadFileCharset(FILE *fp, char *fcharset) {
+ char * line = NULL;
+ size_t line_size = 0;
- (void)fseek(fp, 0, SEEK_SET);
- ssize_t size = getline(&line, &line_size, fp);
- if (size <= 2) {
- goto _exit_no_charset;
- }
+ (void)fseek(fp, 0, SEEK_SET);
+ ssize_t size = getline(&line, &line_size, fp);
+ if (size <= 2) {
+ goto _exit_no_charset;
+ }
- if (strncmp(line, "#!", 2) != 0) {
- goto _exit_no_charset;
- }
- if (line[size - 1] == '\n') {
- line[size - 1] = '\0';
- size--;
- }
- strcpy(fcharset, line + 2);
+ if (strncmp(line, "#!", 2) != 0) {
+ goto _exit_no_charset;
+ }
+ if (line[size - 1] == '\n') {
+ line[size - 1] = '\0';
+ size--;
+ }
+ strcpy(fcharset, line + 2);
- tfree(line);
- return;
+ tfree(line);
+ return;
_exit_no_charset:
- (void)fseek(fp, 0, SEEK_SET);
- *fcharset = '\0';
- tfree(line);
- return;
+ (void)fseek(fp, 0, SEEK_SET);
+ *fcharset = '\0';
+ tfree(line);
+ return;
}
// ======== dumpIn support multi threads functions ================================//
-static char **tsDumpInSqlFiles = NULL;
-static int32_t tsSqlFileNum = 0;
-static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0};
-static char tsfCharset[64] = {0};
-static int taosGetFilesNum(const char *directoryName, const char *prefix)
+static char **g_tsDumpInSqlFiles = NULL;
+static int32_t g_tsSqlFileNum = 0;
+static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
+static char g_tsCharset[64] = {0};
+
+static int taosGetFilesNum(const char *directoryName,
+ const char *prefix, const char *prefix2)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
+ char cmd[1024] = { 0 };
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ if (prefix2)
+ sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ",
+ directoryName, prefix, directoryName, prefix2);
+ else
+ sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
- int fileNum = 0;
- if (fscanf(fp, "%d", &fileNum) != 1) {
- fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd);
- exit(0);
- }
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
+ }
- if (fileNum <= 0) {
- fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName);
- exit(0);
- }
+ int fileNum = 0;
+ if (fscanf(fp, "%d", &fileNum) != 1) {
+ errorPrint("failed to execute:%s, parse result error\n", cmd);
+ exit(-1);
+ }
+
+ if (fileNum <= 0) {
+ errorPrint("directory:%s is empry\n", directoryName);
+ exit(-1);
+ }
- pclose(fp);
- return fileNum;
+ pclose(fp);
+ return fileNum;
}
-static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles)
+static void taosParseDirectory(const char *directoryName,
+ const char *prefix, const char *prefix2,
+ char **fileArray, int totalFiles)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ char cmd[1024] = { 0 };
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ if (prefix2) {
+ sprintf(cmd, "ls %s/*.%s %s/*.%s | sort",
+ directoryName, prefix, directoryName, prefix2);
+ } else {
+ sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ }
- int fileNum = 0;
- while (fscanf(fp, "%128s", fileArray[fileNum++])) {
- if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) {
- fileNum--;
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
}
- if (fileNum >= totalFiles) {
- break;
+
+ int fileNum = 0;
+ while (fscanf(fp, "%128s", fileArray[fileNum++])) {
+ if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) {
+ fileNum--;
+ }
+ if (fileNum >= totalFiles) {
+ break;
+ }
}
- }
- if (fileNum != totalFiles) {
- fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName);
- pclose(fp);
- exit(0);
- }
+ if (fileNum != totalFiles) {
+ errorPrint("directory:%s changed while read\n", directoryName);
+ pclose(fp);
+ exit(-1);
+ }
- pclose(fp);
+ pclose(fp);
}
-static void taosCheckTablesSQLFile(const char *directoryName)
+static void taosCheckDatabasesSQLFile(const char *directoryName)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/dbs.sql", directoryName);
+ char cmd[1024] = { 0 };
+ sprintf(cmd, "ls %s/dbs.sql", directoryName);
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
+ }
- while (fscanf(fp, "%128s", tsDbSqlFile)) {
- break;
- }
+ while (fscanf(fp, "%128s", g_tsDbSqlFile)) {
+ break;
+ }
- pclose(fp);
+ pclose(fp);
}
-static void taosMallocSQLFiles()
+static void taosMallocDumpFiles()
{
- tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*));
- for (int i = 0; i < tsSqlFileNum; i++) {
- tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN);
- }
+ g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
+ for (int i = 0; i < g_tsSqlFileNum; i++) {
+ g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ }
}
-static void taosFreeSQLFiles()
+static void taosFreeDumpFiles()
{
- for (int i = 0; i < tsSqlFileNum; i++) {
- tfree(tsDumpInSqlFiles[i]);
- }
- tfree(tsDumpInSqlFiles);
+ for (int i = 0; i < g_tsSqlFileNum; i++) {
+ tfree(g_tsDumpInSqlFiles[i]);
+ }
+ tfree(g_tsDumpInSqlFiles);
}
static void taosGetDirectoryFileList(char *inputDir)
{
- struct stat fileStat;
- if (stat(inputDir, &fileStat) < 0) {
- fprintf(stderr, "ERROR: %s not exist\n", inputDir);
- exit(0);
- }
-
- if (fileStat.st_mode & S_IFDIR) {
- taosCheckTablesSQLFile(inputDir);
- tsSqlFileNum = taosGetFilesNum(inputDir, "sql");
- int tsSqlFileNumOfTbls = tsSqlFileNum;
- if (tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ struct stat fileStat;
+ if (stat(inputDir, &fileStat) < 0) {
+ errorPrint("%s not exist\n", inputDir);
+ exit(-1);
}
- taosMallocSQLFiles();
- if (0 != tsSqlFileNumOfTbls) {
- taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+
+ if (fileStat.st_mode & S_IFDIR) {
+ taosCheckDatabasesSQLFile(inputDir);
+ if (g_args.avro)
+ g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
+ else
+ g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
+
+ int tsSqlFileNumOfTbls = g_tsSqlFileNum;
+ if (g_tsDbSqlFile[0] != 0) {
+ tsSqlFileNumOfTbls--;
+ }
+ taosMallocDumpFiles();
+ if (0 != tsSqlFileNumOfTbls) {
+ if (g_args.avro) {
+ taosParseDirectory(inputDir, "sql", "avro",
+ g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+ } else {
+ taosParseDirectory(inputDir, "sql", NULL,
+ g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+ }
+ }
+ fprintf(stdout, "\nstart to dispose %d files in %s\n",
+ g_tsSqlFileNum, inputDir);
+ } else {
+ errorPrint("%s is not a directory\n", inputDir);
+ exit(-1);
}
- fprintf(stdout, "\nstart to dispose %d files in %s\n", tsSqlFileNum, inputDir);
- }
- else {
- fprintf(stderr, "ERROR: %s is not a directory\n", inputDir);
- exit(0);
- }
}
-static FILE* taosOpenDumpInFile(char *fptr) {
- wordexp_t full_path;
-
- if (wordexp(fptr, &full_path, 0) != 0) {
- fprintf(stderr, "ERROR: illegal file name: %s\n", fptr);
- return NULL;
- }
+static FILE* taosOpenDumpInFile(char *fptr) {
+ wordexp_t full_path;
- char *fname = full_path.we_wordv[0];
+ if (wordexp(fptr, &full_path, 0) != 0) {
+ errorPrint("illegal file name: %s\n", fptr);
+ return NULL;
+ }
- FILE *f = fopen(fname, "r");
- if (f == NULL) {
- fprintf(stderr, "ERROR: failed to open file %s\n", fname);
- wordfree(&full_path);
- return NULL;
- }
+ char *fname = full_path.we_wordv[0];
- wordfree(&full_path);
+ FILE *f = NULL;
+ if ((fname) && (strlen(fname) > 0)) {
+ f = fopen(fname, "r");
+ if (f == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, fname);
+ }
+ }
- return f;
+ wordfree(&full_path);
+ return f;
}
-int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) {
- int read_len = 0;
- char * cmd = NULL;
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
-
- cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
- if (cmd == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset,
+ char* encode, char* fileName) {
+ int read_len = 0;
+ char * cmd = NULL;
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
+
+ cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
+ if (cmd == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- int lastRowsPrint = 5000000;
- int lineNo = 0;
- while ((read_len = getline(&line, &line_len, fp)) != -1) {
- ++lineNo;
- if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
- line[--read_len] = '\0';
+ int lastRowsPrint = 5000000;
+ int lineNo = 0;
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ ++lineNo;
+ if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
+ line[--read_len] = '\0';
- //if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
- continue;
- }
+ //if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ if (read_len == 0 ) {
+ continue;
+ }
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
+ }
- memcpy(cmd + cmd_len, line, read_len);
- cmd[read_len + cmd_len]= '\0';
- if (queryDbImpl(taos, cmd)) {
- fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
- }
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd[read_len + cmd_len]= '\0';
+ if (queryDbImpl(taos, cmd)) {
+ errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n",
+ __func__, __LINE__, lineNo, fileName);
+ fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
+ }
- memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
+ memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
+ cmd_len = 0;
- if (lineNo >= lastRowsPrint) {
- printf(" %d lines already be executed from file %s\n", lineNo, fileName);
- lastRowsPrint += 5000000;
+ if (lineNo >= lastRowsPrint) {
+ printf(" %d lines already be executed from file %s\n", lineNo, fileName);
+ lastRowsPrint += 5000000;
+ }
}
- }
- tfree(cmd);
- tfree(line);
- fclose(fp);
- return 0;
+ tfree(cmd);
+ tfree(line);
+ fclose(fp);
+ return 0;
}
-void* taosDumpInWorkThreadFp(void *arg)
+static void* taosDumpInWorkThreadFp(void *arg)
{
- SThreadParaObj *pThread = (SThreadParaObj*)arg;
- for (int32_t f = 0; f < tsSqlFileNum; ++f) {
- if (f % pThread->totalThreads == pThread->threadIndex) {
- char *SQLFileName = tsDumpInSqlFiles[f];
- FILE* fp = taosOpenDumpInFile(SQLFileName);
- if (NULL == fp) {
- continue;
- }
- fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
- taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName);
+ SThreadParaObj *pThread = (SThreadParaObj*)arg;
+ for (int32_t f = 0; f < g_tsSqlFileNum; ++f) {
+ if (f % pThread->totalThreads == pThread->threadIndex) {
+ char *SQLFileName = g_tsDumpInSqlFiles[f];
+ FILE* fp = taosOpenDumpInFile(SQLFileName);
+ if (NULL == fp) {
+ continue;
+ }
+ fprintf(stderr, ", Success Open input file: %s\n",
+ SQLFileName);
+ taosDumpInOneFile(pThread->taosCon, fp, g_tsCharset, g_args.encode, SQLFileName);
+ }
}
- }
- return NULL;
+ return NULL;
}
-static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
+static void taosStartDumpInWorkThreads()
{
- pthread_attr_t thattr;
- SThreadParaObj *pThread;
- int32_t totalThreads = args->thread_num;
+ pthread_attr_t thattr;
+ SThreadParaObj *pThread;
+ int32_t totalThreads = g_args.thread_num;
- if (totalThreads > tsSqlFileNum) {
- totalThreads = tsSqlFileNum;
- }
+ if (totalThreads > g_tsSqlFileNum) {
+ totalThreads = g_tsSqlFileNum;
+ }
- SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
- for (int32_t t = 0; t < totalThreads; ++t) {
- pThread = threadObj + t;
- pThread->threadIndex = t;
- pThread->totalThreads = totalThreads;
- pThread->taosCon = taosCon;
+ SThreadParaObj *threadObj = (SThreadParaObj *)calloc(
+ totalThreads, sizeof(SThreadParaObj));
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ if (NULL == threadObj) {
+ errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__);
+ }
- if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) {
- fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex);
- exit(0);
+ for (int32_t t = 0; t < totalThreads; ++t) {
+ pThread = threadObj + t;
+ pThread->threadIndex = t;
+ pThread->totalThreads = totalThreads;
+ pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taosCon == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ free(threadObj);
+ return;
+ }
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&(pThread->threadID), &thattr,
+ taosDumpInWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread:%d failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(0);
+ }
}
- }
- for (int t = 0; t < totalThreads; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
+ for (int t = 0; t < totalThreads; ++t) {
+ pthread_join(threadObj[t].threadID, NULL);
+ }
- for (int t = 0; t < totalThreads; ++t) {
- taos_close(threadObj[t].taosCon);
- }
- free(threadObj);
+ for (int t = 0; t < totalThreads; ++t) {
+ taos_close(threadObj[t].taosCon);
+ }
+ free(threadObj);
}
+static int taosDumpIn() {
+ assert(g_args.isDumpIn);
+
+ TAOS *taos = NULL;
+ FILE *fp = NULL;
-int taosDumpIn(struct arguments *arguments) {
- assert(arguments->isDumpIn);
+ taos = taos_connect(
+ g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("%s() LN%d, failed to connect to TDengine server\n",
+ __func__, __LINE__);
+ return -1;
+ }
- TAOS *taos = NULL;
- FILE *fp = NULL;
+ taosGetDirectoryFileList(g_args.inpath);
- taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port);
- if (taos == NULL) {
- fprintf(stderr, "failed to connect to TDengine server\n");
- return -1;
- }
+ int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum;
+ if (g_tsDbSqlFile[0] != 0) {
+ tsSqlFileNumOfTbls--;
- taosGetDirectoryFileList(arguments->inpath);
+ fp = taosOpenDumpInFile(g_tsDbSqlFile);
+ if (NULL == fp) {
+ errorPrint("%s() LN%d, failed to open input file %s\n",
+ __func__, __LINE__, g_tsDbSqlFile);
+ return -1;
+ }
+ fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
- int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
- if (tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ taosLoadFileCharset(fp, g_tsCharset);
- fp = taosOpenDumpInFile(tsDbSqlFile);
- if (NULL == fp) {
- fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
- return -1;
+ taosDumpInOneFile(taos, fp, g_tsCharset, g_args.encode,
+ g_tsDbSqlFile);
}
- fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
- taosLoadFileCharset(fp, tsfCharset);
- taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
- }
+ taos_close(taos);
- if (0 != tsSqlFileNumOfTbls) {
- taosStartDumpInWorkThreads(taos, arguments);
- }
+ if (0 != tsSqlFileNumOfTbls) {
+ taosStartDumpInWorkThreads();
+ }
- taos_close(taos);
- taosFreeSQLFiles();
- return 0;
+ taosFreeDumpFiles();
+ return 0;
}
-
diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt
index 2df4708c239515febafc7a4f3ab3f63bd9e434e8..a7fc54d87786f430f913980f089d29d969b01fce 100644
--- a/src/mnode/CMakeLists.txt
+++ b/src/mnode/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index e3feea7d3af4b78cbae5038ecea2e3825ddb370d..5a0743dd1b2ad97350c8b8f2ce57d91254d60eaf 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -16,7 +16,6 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "tgrant.h"
-#include "tbn.h"
#include "tglobal.h"
#include "tconfig.h"
#include "tutil.h"
@@ -632,7 +631,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
}
int32_t numOfMnodes = mnodeGetMnodesNum();
- if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() && !pDnode->isMgmt) {
+ if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum()
+ && bnDnodeCanCreateMnode(pDnode)) {
bnNotify();
}
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 20edb02c381b92ac3e04be546f63e41e5d21830e..beeff372aa75a34c4be1857782a76c2426748140 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1035,6 +1035,20 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
return code;
}
+static uint64_t mnodeCreateSuperTableUid() {
+ int64_t us = taosGetTimestampUs();
+ uint64_t x = (us & ((((uint64_t)1)<<40) - 1));
+ x = x << 24;
+
+ return x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+}
+
+static uint64_t mnodeCreateTableUid(int32_t vgId, int32_t tid) {
+ uint64_t uid = (((uint64_t)vgId) << 48) + ((((uint64_t)tid) & ((1ul << 24) - 1ul)) << 24) +
+ ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ return uid;
+}
+
static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
@@ -1058,19 +1072,16 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
}
- SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
+ SSTableObj *pStable = calloc(1, sizeof(SSTableObj));
if (pStable == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
- int64_t us = taosGetTimestampUs();
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
- int64_t x = (us&0x000000FFFFFFFFFF);
- x = x<<24;
- pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pStable->uid = mnodeCreateSuperTableUid();
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
@@ -1079,7 +1090,8 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
int32_t schemaSize = numOfCols * sizeof(SSchema);
pStable->schema = (SSchema *)calloc(1, schemaSize);
if (pStable->schema == NULL) {
- free(pStable);
+ tfree(pStable->info.tableId);
+ tfree(pStable);
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
@@ -1096,6 +1108,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (!tIsValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) {
mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
+ tfree(pStable->info.tableId);
+ tfree(pStable->schema);
+ tfree(pStable);
return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG;
}
@@ -2069,16 +2084,13 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
}
pTable->suid = pMsg->pSTable->uid;
- pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) +
- ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid);
pTable->superTable = pMsg->pSTable;
} else {
if (pTable->info.type == TSDB_SUPER_TABLE) {
- int64_t us = taosGetTimestampUs();
- pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateSuperTableUid();
} else {
- pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) +
- ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid);
}
pTable->sversion = 0;
diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt
index 4472c683c70f0e4463ae46c63aaff7c7c7ba0fd6..a64c9d79dd6af511448ad0f9b186f6e50d59c728 100644
--- a/src/os/CMakeLists.txt
+++ b/src/os/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX)
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index 6731ca6d7db9ce72e72a88a1b9dadf76fb8ec87e..903e80d5c7f554d420eafc9224fe5e7e35fe8467 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -29,7 +29,7 @@ extern "C" {
#include "osMath.h"
#include "osMemory.h"
#include "osRand.h"
-#include "osSemphone.h"
+#include "osSemaphore.h"
#include "osSignal.h"
#include "osSleep.h"
#include "osSocket.h"
diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemaphore.h
similarity index 97%
rename from src/os/inc/osSemphone.h
rename to src/os/inc/osSemaphore.h
index fe59095205010bef553413809706c62cd772a7e3..10d14700e013f66e6d98208f0e65fe1ca5fc3874 100644
--- a/src/os/inc/osSemphone.h
+++ b/src/os/inc/osSemaphore.h
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#ifndef TDENGINE_OS_SEMPHONE_H
-#define TDENGINE_OS_SEMPHONE_H
+#ifndef TDENGINE_OS_SEMAPHORE_H
+#define TDENGINE_OS_SEMAPHORE_H
#ifdef __cplusplus
extern "C" {
diff --git a/src/os/src/darwin/CMakeLists.txt b/src/os/src/darwin/CMakeLists.txt
index 259e1a7a0b56a02b7d67825acc85caef5b598089..ed75cac03da112348ff153005d5330786f6386ac 100644
--- a/src/os/src/darwin/CMakeLists.txt
+++ b/src/os/src/darwin/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC)
diff --git a/src/os/src/darwin/dwSemphone.c b/src/os/src/darwin/dwSemaphore.c
similarity index 100%
rename from src/os/src/darwin/dwSemphone.c
rename to src/os/src/darwin/dwSemaphore.c
diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt
index 5c49df24c1c7b1e88c0ba206f2d100fe90ed21c6..ac68cf4cd8cbd217da8aa2d4a41a5aa159562868 100644
--- a/src/os/src/detail/CMakeLists.txt
+++ b/src/os/src/detail/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(.)
diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemaphore.c
similarity index 100%
rename from src/os/src/detail/osSemphone.c
rename to src/os/src/detail/osSemaphore.c
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index 891dccaf9779016065013d4f59580026fb98352a..04b1efe7bf78d57a8806960995b0e34ff79e3abb 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -74,13 +74,14 @@ bool taosGetProcMemory(float *memoryUsedMB) {
return false;
}
+ ssize_t _bytes = 0;
size_t len;
char * line = NULL;
while (!feof(fp)) {
tfree(line);
len = 0;
- getline(&line, &len, fp);
- if (line == NULL) {
+ _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
break;
}
if (strstr(line, "VmRSS:") != NULL) {
@@ -113,8 +114,8 @@ static bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) {
size_t len;
char * line = NULL;
- getline(&line, &len, fp);
- if (line == NULL) {
+ ssize_t _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
uError("read file:%s failed", tsSysCpuFile);
fclose(fp);
return false;
@@ -138,8 +139,8 @@ static bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
size_t len = 0;
char * line = NULL;
- getline(&line, &len, fp);
- if (line == NULL) {
+ ssize_t _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
uError("read file:%s failed", tsProcCpuFile);
fclose(fp);
return false;
@@ -339,6 +340,7 @@ static bool taosGetCardInfo(int64_t *bytes) {
return false;
}
+ ssize_t _bytes = 0;
size_t len = 2048;
char * line = calloc(1, len);
@@ -357,7 +359,12 @@ static bool taosGetCardInfo(int64_t *bytes) {
int64_t nouse6 = 0;
char nouse0[200] = {0};
- getline(&line, &len, fp);
+ _bytes = getline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
if (strstr(line, "lo:") != NULL) {
@@ -420,6 +427,7 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
return false;
}
+ ssize_t _bytes = 0;
size_t len;
char * line = NULL;
char tmp[10];
@@ -428,8 +436,8 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
while (!feof(fp)) {
tfree(line);
len = 0;
- getline(&line, &len, fp);
- if (line == NULL) {
+ _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
break;
}
if (strstr(line, "rchar:") != NULL) {
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 4d64e14d9415a256b9ef5032db3de35bcfeedb79..847d484d9e28fd11d1df3c3b2905afcf257174dd 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -348,6 +348,7 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
{1.0 / 1000000, 1.0 / 1000, 1.} };
return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
}
+
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt
index 08b696ba1aedb80ecc17997811591fea1209f1ae..d1d95b0096124e122cd630df89ef1057def10373 100644
--- a/src/os/src/linux/CMakeLists.txt
+++ b/src/os/src/linux/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC)
diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c
index 417513314c7013a3e707999bfbd7f9dbd1a4baa8..b7b268b19e6b6f92babb74cfd3f23793be037cd0 100644
--- a/src/os/src/linux/linuxEnv.c
+++ b/src/os/src/linux/linuxEnv.c
@@ -25,6 +25,13 @@ void osInit() {
strcpy(tsDataDir, "/var/lib/power");
strcpy(tsLogDir, "/var/log/power");
strcpy(tsScriptDir, "/etc/power");
+#elif (_TD_TQ_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/tq");
+ }
+ strcpy(tsDataDir, "/var/lib/tq");
+ strcpy(tsLogDir, "/var/log/tq");
+ strcpy(tsScriptDir, "/etc/tq");
#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
diff --git a/src/os/src/windows/CMakeLists.txt b/src/os/src/windows/CMakeLists.txt
index e5472e1abd618f27d119efbb926a959bf8c737c6..83012d6e3e5a2e11655f4a1c0742cdd25cccddf2 100644
--- a/src/os/src/windows/CMakeLists.txt
+++ b/src/os/src/windows/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC)
diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c
index 19351eb7c964a4c2a8a4d1d5d4d1c8ec669908dc..b35cb8f040aec5ff4b4fb12665d0842e72958ba1 100644
--- a/src/os/src/windows/wEnv.c
+++ b/src/os/src/windows/wEnv.c
@@ -31,7 +31,14 @@ void osInit() {
strcpy(tsDataDir, "C:/PowerDB/data");
strcpy(tsLogDir, "C:/PowerDB/log");
strcpy(tsScriptDir, "C:/PowerDB/script");
-
+#elif (_TD_TQ_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/TQ/cfg");
+ }
+ strcpy(tsVnodeDir, "C:/TQ/data");
+ strcpy(tsDataDir, "C:/TQ/data");
+ strcpy(tsLogDir, "C:/TQ/log");
+ strcpy(tsScriptDir, "C:/TQ/script");
#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
@@ -48,9 +55,10 @@ void osInit() {
strcpy(tsOsName, "Windows");
const char *tmpDir = getenv("tmp");
- if (tmpDir != NULL) {
+ if (tmpDir == NULL) {
tmpDir = getenv("temp");
}
+
if (tmpDir != NULL) {
strcpy(tsTempDir, tmpDir);
} else {
diff --git a/src/os/src/windows/wSemphone.c b/src/os/src/windows/wSemaphore.c
similarity index 100%
rename from src/os/src/windows/wSemphone.c
rename to src/os/src/windows/wSemaphore.c
diff --git a/src/os/tests/CMakeLists.txt b/src/os/tests/CMakeLists.txt
index b00f0ebdc85a24974fd36228ff69ced16a32b76d..3c477641899994bf34237e93122c3d83f0365fad 100644
--- a/src/os/tests/CMakeLists.txt
+++ b/src/os/tests/CMakeLists.txt
@@ -1,10 +1,11 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11
@@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(osTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread)
-ENDIF()
\ No newline at end of file
+ENDIF()
diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt
index 7dcaaf27e615ead75e83630788288a27e938b0a9..320445f7f784884f8aa009e37182fc57a38bb96f 100644
--- a/src/plugins/CMakeLists.txt
+++ b/src/plugins/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
ADD_SUBDIRECTORY(monitor)
diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt
index bfb47ad12e8b1ef7099109ecf5849ec3575caf5f..57fc2ee3a2692c239d7fa36d6e55ddae738a2720 100644
--- a/src/plugins/http/CMakeLists.txt
+++ b/src/plugins/http/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c
index 397791706d0fc24f250c2332dddc5b0b031a4817..f33a994465a94bad5d79df8af73ff4fd9d640516 100644
--- a/src/plugins/http/src/httpGcJson.c
+++ b/src/plugins/http/src/httpGcJson.c
@@ -228,13 +228,11 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
case TSDB_DATA_TYPE_NCHAR:
httpJsonStringForTransMean(jsonBuf, (char *)row[i], fields[i].bytes);
break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (precision == TSDB_TIME_PRECISION_MILLI) { // ms
- httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
- } else {
- httpJsonInt64(jsonBuf, *((int64_t *)row[i]) / 1000);
- }
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ int64_t ts = convertTimePrecision(*((int64_t *)row[i]), precision, TSDB_TIME_PRECISION_MILLI);
+ httpJsonInt64(jsonBuf, ts);
break;
+ }
default:
httpJsonString(jsonBuf, "-", 1);
break;
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index df4c4fbc63d469c22ea13273564bec49562f35a9..10300e93670b5e10f56259d51b6ca31df3e90e39 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -297,6 +297,7 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
}
default:
+ fractionLen = 0;
assert(false);
}
@@ -342,6 +343,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
}
default:
+ fractionLen = 0;
assert(false);
}
diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt
index 28c62a099c0f2bea8b33a57c577bc89c7fb15aaa..8a05d63e141facfe34e740887384fec0337534d4 100644
--- a/src/plugins/monitor/CMakeLists.txt
+++ b/src/plugins/monitor/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt
index 50b0bbe8af4faeab41a7b041d6aa51747f0aab3e..081512138505ab7e7a54a8bbe770aa293adec0be 100644
--- a/src/plugins/mqtt/CMakeLists.txt
+++ b/src/plugins/mqtt/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt
index f23ac7dd86932ba42dde7c2891865f7dff546a00..d2411d47b52bac65e86852ef06116066e9b18774 100644
--- a/src/query/CMakeLists.txt
+++ b/src/query/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 08355cf4aef3b9000b573cd114fcc6798142e55c..d299976955e777b9d1eee9518ec5060f2bfcb54b 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -73,14 +73,14 @@ typedef struct SResultRowPool {
typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
- int32_t offset:29; // row index in buffer page
+ int32_t offset:29; // row index in buffer page
bool startInterp; // the time window start timestamp has done the interpolation already.
bool endInterp; // the time window end timestamp has done the interpolation already.
bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
- STimeWindow win;
- char* key; // start key of current result row
+ STimeWindow win;
+ char *key; // start key of current result row
} SResultRow;
typedef struct SGroupResInfo {
@@ -105,8 +105,7 @@ typedef struct SResultRowInfo {
int16_t type:8; // data type for hash key
int32_t size:24; // number of result set
int32_t capacity; // max capacity
- int32_t curIndex; // current start active index
- int64_t prevSKey; // previous (not completed) sliding window start key
+ int32_t curPos; // current active result row index of pResult list
} SResultRowInfo;
typedef struct SColumnFilterElem {
@@ -118,6 +117,7 @@ typedef struct SColumnFilterElem {
typedef struct SSingleColumnFilterInfo {
void* pData;
+ void* pData2; //used for nchar column
int32_t numOfFilters;
SColumnInfo info;
SColumnFilterElem* pFilters;
@@ -133,6 +133,28 @@ typedef struct STableQueryInfo {
SResultRowInfo resInfo;
} STableQueryInfo;
+typedef enum {
+ QUERY_PROF_BEFORE_OPERATOR_EXEC = 0,
+ QUERY_PROF_AFTER_OPERATOR_EXEC,
+ QUERY_PROF_QUERY_ABORT
+} EQueryProfEventType;
+
+typedef struct {
+ EQueryProfEventType eventType;
+ int64_t eventTime;
+
+ union {
+ uint8_t operatorType; //for operator event
+ int32_t abortCode; //for query abort event
+ };
+} SQueryProfEvent;
+
+typedef struct {
+ uint8_t operatorType;
+ int64_t sumSelfTime;
+ int64_t sumRunTimes;
+} SOperatorProfResult;
+
typedef struct SQueryCostInfo {
uint64_t loadStatisTime;
uint64_t loadFileBlockTime;
@@ -154,6 +176,9 @@ typedef struct SQueryCostInfo {
uint64_t tableInfoSize;
uint64_t hashSize;
uint64_t numOfTimeWindows;
+
+ SArray* queryProfEvents; //SArray
+ SHashObj* operatorProfResults; //map
} SQueryCostInfo;
typedef struct {
@@ -192,6 +217,7 @@ typedef struct SQueryAttr {
bool needReverseScan; // need reverse scan
bool distinctTag; // distinct tag query
bool stateWindow; // window State on sub/normal table
+ bool createFilterOperator; // if filter operator is needed
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
@@ -252,6 +278,7 @@ typedef struct SQueryRuntimeEnv {
bool enableGroupData;
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SHashObj* pResultRowHashTable; // quick locate the window object for each result
+ SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not
char* keyBuf; // window key buffer
SResultRowPool* pool; // window result object pool
char** prevRow;
@@ -287,7 +314,7 @@ enum OPERATOR_TYPE_E {
OP_TagScan = 4,
OP_TableBlockInfoScan= 5,
OP_Aggregate = 6,
- OP_Arithmetic = 7,
+ OP_Project = 7,
OP_Groupby = 8,
OP_Limit = 9,
OP_SLimit = 10,
@@ -401,7 +428,7 @@ typedef struct STagScanInfo {
SColumnInfo* pCols;
SSDataBlock* pRes;
int32_t totalTables;
- int32_t currentIndex;
+ int32_t curPos;
} STagScanInfo;
typedef struct SOptrBasicInfo {
@@ -418,13 +445,13 @@ typedef struct SAggOperatorInfo {
uint32_t seed;
} SAggOperatorInfo;
-typedef struct SArithOperatorInfo {
+typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo;
int32_t bufCapacity;
uint32_t seed;
SSDataBlock *existDataBlock;
-} SArithOperatorInfo;
+} SProjectOperatorInfo;
typedef struct SLimitOperatorInfo {
int64_t limit;
@@ -518,7 +545,7 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv*
SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
-SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
+SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
@@ -551,6 +578,7 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows);
void* destroyOutputBuf(SSDataBlock* pBlock);
+void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order);
int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput);
@@ -594,7 +622,12 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows);
void setQueryKilled(SQInfo *pQInfo);
+
+void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType);
+void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code);
+void calculateOperatorProfResults(SQInfo* pQInfo);
void queryCostStatis(SQInfo *pQInfo);
+
void freeQInfo(SQInfo *pQInfo);
void freeQueryAttr(SQueryAttr *pQuery);
diff --git a/src/query/inc/qFilter.h b/src/query/inc/qFilter.h
index 1a618c62fa8bd4940eba289d052cb1562e7481c0..73f86f3ff3fda95d6707a5d4e05e6255e92a5af2 100644
--- a/src/query/inc/qFilter.h
+++ b/src/query/inc/qFilter.h
@@ -221,6 +221,10 @@ extern int32_t filterGetMergeRangeNum(void* h, int32_t* num);
extern int32_t filterGetMergeRangeRes(void* h, SFilterRange *ra);
extern int32_t filterFreeMergeRange(void* h);
extern int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win);
+extern int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar);
+extern int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo);
+extern void filterFreeInfo(SFilterInfo *info);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index c6f667475ee7b061fcbc86be7a2aa8fac7282e8e..f9a9992b81aa30b15b042a96af343c9e943bd20b 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -237,18 +237,20 @@ typedef struct tSqlExpr {
uint16_t type; // sql node type
uint32_t tokenId; // TK_LE: less than(binary expr)
- // the whole string of the function(col, param), while the function name is kept in token
- SStrToken operand;
- uint32_t functionId; // function id
+ // the whole string of the function(col, param), while the function name is kept in exprToken
+ struct {
+ SStrToken operand;
+ struct SArray *paramList; // function parameters list
+ } Expr;
- SStrToken colInfo; // table column info
+ uint32_t functionId; // function id, todo remove it
+ SStrToken columnName; // table column info
tVariant value; // the use input value
- SStrToken token; // original sql expr string
- uint32_t flags;
+ SStrToken exprToken; // original sql expr string
+ uint32_t flags; // todo remove it
struct tSqlExpr *pLeft; // left child
struct tSqlExpr *pRight; // right child
- struct SArray *pParam; // function parameters list
} tSqlExpr;
// used in select clause. select from xxx
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index 0cac15875b3ddeb723d8b594555e4ac5cb0b9626..c2182c21325a6724d9535b5ae61af11e681894de 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -135,7 +135,6 @@ typedef struct SQueryInfo {
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
- int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index 0756e4178598f09ecdc784b07e226b9054edcfef..c8741030c06ac0e9463de85c2e1f358ed24fa649 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -24,7 +24,18 @@
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
} while (0)
+#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \
+ do { \
+ assert(sizeof(_uid) == sizeof(uint64_t)); \
+ *(void **)(_k) = (_buf); \
+ *(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \
+ memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \
+ } while (0)
+
+
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
+#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES)
+
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index ce2c3e361654ebe67310e6b71edc4ea6506d64ed..abd7d06b6080f6361cb1504b8d86c62f11ee0bac 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -303,11 +303,13 @@ alter_db_optr(Y) ::= alter_db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol
alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); }
-alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); }
-alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); }
alter_db_optr(Y) ::= alter_db_optr(Z) cachelast(X). { Y = Z; Y.cachelast = strtol(X.z, NULL, 10); }
+// dynamically update the following two parameters are not allowed.
+//alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); }
+//alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } not support yet
+
%type alter_topic_optr {SCreateDbInfo}
alter_topic_optr(Y) ::= alter_db_optr(Z). { Y = Z; Y.dbType = TSDB_DB_TYPE_TOPIC; }
@@ -681,7 +683,7 @@ where_opt(A) ::= WHERE expr(X). {A = X;}
%type expr {tSqlExpr*}
%destructor expr {tSqlExprDestroy($$);}
-expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->token.z = X.z; A->token.n = (Z.z - X.z + 1);}
+expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->exprToken.z = X.z; A->exprToken.n = (Z.z - X.z + 1);}
expr(A) ::= ID(X). { A = tSqlExprCreateIdValue(&X, TK_ID);}
expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(&X, TK_ID);}
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index 8efc4aad4c82ce34047fb87e10db169a1d8e5e3f..212b65fb3db90dbe84cfc174396412e9689da55b 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -74,7 +74,6 @@
} while (0);
void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
-void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {}
void doFinalizer(SQLFunctionCtx *pCtx) { RESET_RESULT_INFO(GET_RES_INFO(pCtx)); }
@@ -1576,7 +1575,7 @@ static void last_function(SQLFunctionCtx *pCtx) {
memcpy(pCtx->pOutput, data, pCtx->inputBytes);
- TSKEY ts = GET_TS_DATA(pCtx, i);
+ TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
DO_UPDATE_TAG_COLUMNS(pCtx, ts);
pResInfo->hasResult = DATA_SET_FLAG;
@@ -1591,7 +1590,7 @@ static void last_function(SQLFunctionCtx *pCtx) {
continue;
}
- TSKEY ts = GET_TS_DATA(pCtx, i);
+ TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
@@ -1759,6 +1758,49 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
memcpy((dst)->pTags, (src)->pTags, (size_t)(__l)); \
} while (0)
+static int32_t topBotComparFn(const void *p1, const void *p2, const void *param)
+{
+ uint16_t type = *(uint16_t *) param;
+ tValuePair *val1 = *(tValuePair **) p1;
+ tValuePair *val2 = *(tValuePair **) p2;
+
+ if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ if (val1->v.i64 == val2->v.i64) {
+ return 0;
+ }
+
+ return (val1->v.i64 > val2->v.i64) ? 1 : -1;
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
+ if (val1->v.u64 == val2->v.u64) {
+ return 0;
+ }
+
+ return (val1->v.u64 > val2->v.u64) ? 1 : -1;
+ }
+
+ if (val1->v.dKey == val2->v.dKey) {
+ return 0;
+ }
+
+ return (val1->v.dKey > val2->v.dKey) ? 1 : -1;
+}
+
+static void topBotSwapFn(void *dst, void *src, const void *param)
+{
+ char tag[32768];
+ tValuePair temp;
+ uint16_t tagLen = *(uint16_t *) param;
+ tValuePair *vdst = *(tValuePair **) dst;
+ tValuePair *vsrc = *(tValuePair **) src;
+
+ memset(tag, 0, sizeof(tag));
+ temp.pTags = tag;
+
+ VALUEPAIRASSIGN(&temp, vdst, tagLen);
+ VALUEPAIRASSIGN(vdst, vsrc, tagLen);
+ VALUEPAIRASSIGN(vsrc, &temp, tagLen);
+}
+
static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type,
SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) {
tVariant val = {0};
@@ -1766,61 +1808,19 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
tValuePair **pList = pInfo->res;
assert(pList != NULL);
-
+
if (pInfo->num < maxLen) {
- if (pInfo->num == 0 ||
- (IS_SIGNED_NUMERIC_TYPE(type) && val.i64 >= pList[pInfo->num - 1]->v.i64) ||
- (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 >= pList[pInfo->num - 1]->v.u64) ||
- (IS_FLOAT_TYPE(type) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) {
- valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
- } else {
- int32_t i = pInfo->num - 1;
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
- while (i >= 0 && pList[i]->v.i64 > val.i64) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- while (i >= 0 && pList[i]->v.u64 > val.u64) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- } else {
- while (i >= 0 && pList[i]->v.dKey > val.dKey) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- }
-
- valuePairAssign(pList[i + 1], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage);
- }
-
+ valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
+
+ taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0);
+
pInfo->num++;
} else {
- int32_t i = 0;
-
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 > pList[0]->v.i64) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 > pList[0]->v.u64) ||
(IS_FLOAT_TYPE(type) && val.dKey > pList[0]->v.dKey)) {
- // find the appropriate the slot position
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
- while (i + 1 < maxLen && pList[i + 1]->v.i64 < val.i64) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- } if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- while (i + 1 < maxLen && pList[i + 1]->v.u64 < val.u64) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- } else {
- while (i + 1 < maxLen && pList[i + 1]->v.dKey < val.dKey) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- }
-
- valuePairAssign(pList[i], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
+ valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
+ taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0);
}
}
}
@@ -1834,57 +1834,17 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa
assert(pList != NULL);
if (pInfo->num < maxLen) {
- if (pInfo->num == 0) {
- valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage);
- } else {
- int32_t i = pInfo->num - 1;
-
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
- while (i >= 0 && pList[i]->v.i64 < val.i64) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- while (i >= 0 && pList[i]->v.u64 < val.u64) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- } else {
- while (i >= 0 && pList[i]->v.dKey < val.dKey) {
- VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen);
- i -= 1;
- }
- }
-
- valuePairAssign(pList[i + 1], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
- }
-
+ valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
+
+ taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1);
+
pInfo->num++;
} else {
- int32_t i = 0;
-
if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 < pList[0]->v.i64) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 < pList[0]->v.u64) ||
(IS_FLOAT_TYPE(type) && val.dKey < pList[0]->v.dKey)) {
- // find the appropriate the slot position
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
- while (i + 1 < maxLen && pList[i + 1]->v.i64 > val.i64) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- } if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- while (i + 1 < maxLen && pList[i + 1]->v.u64 > val.u64) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- } else {
- while (i + 1 < maxLen && pList[i + 1]->v.dKey > val.dKey) {
- VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen);
- i += 1;
- }
- }
-
- valuePairAssign(pList[i], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage);
+ valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage);
+ taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1);
}
}
}
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index f6a5d222877c266a33e9b4f26a06407f7040143b..01f43778f9bb09cfd896a3532524b9e88e90f02b 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -16,6 +16,7 @@
#include "qFill.h"
#include "taosmsg.h"
#include "tglobal.h"
+#include "talgo.h"
#include "exception.h"
#include "hash.h"
@@ -26,6 +27,7 @@
#include "queryLog.h"
#include "tlosertree.h"
#include "ttype.h"
+#include "tcompare.h"
#include "tscompression.h"
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
@@ -177,14 +179,13 @@ static STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* w
static STableIdInfo createTableIdInfo(STableQueryInfo* pTableQueryInfo);
static void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInfo* pDownstream);
-static void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyArithOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
@@ -207,7 +208,70 @@ static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowIn
SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput,
int32_t groupIndex);
-// setup the output buffer for each operator
+SArray* getOrderCheckColumns(SQueryAttr* pQuery);
+
+
+typedef struct SRowCompSupporter {
+ SQueryRuntimeEnv *pRuntimeEnv;
+ int16_t dataOffset;
+ __compar_fn_t comFunc;
+} SRowCompSupporter;
+
+static int compareRowData(const void *a, const void *b, const void *userData) {
+ const SResultRow *pRow1 = (const SResultRow *)a;
+ const SResultRow *pRow2 = (const SResultRow *)b;
+
+ SRowCompSupporter *supporter = (SRowCompSupporter *)userData;
+ SQueryRuntimeEnv* pRuntimeEnv = supporter->pRuntimeEnv;
+
+ tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pRow1->pageId);
+ tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pRow2->pageId);
+
+ int16_t offset = supporter->dataOffset;
+ char *in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset);
+ char *in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset);
+
+ return (in1 != NULL && in2 != NULL) ? supporter->comFunc(in1, in2) : 0;
+}
+
+static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, SSDataBlock* pDataBlock) {
+ SArray *columnOrderList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
+ size_t size = taosArrayGetSize(columnOrderList);
+ taosArrayDestroy(columnOrderList);
+
+ if (size <= 0) {
+ return;
+ }
+
+ int32_t orderId = pRuntimeEnv->pQueryAttr->order.orderColId;
+ if (orderId <= 0) {
+ return;
+ }
+
+ bool found = false;
+ int16_t dataOffset = 0;
+
+ for (int32_t j = 0; j < pDataBlock->info.numOfCols; ++j) {
+ SColumnInfoData* pColInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock->pDataBlock, j);
+ if (orderId == j) {
+ found = true;
+ break;
+ }
+
+ dataOffset += pColInfoData->info.bytes;
+ }
+
+ if (found == false) {
+ return;
+ }
+
+ int16_t type = pRuntimeEnv->pQueryAttr->pExpr1[orderId].base.resType;
+
+ SRowCompSupporter support = {.pRuntimeEnv = pRuntimeEnv, .dataOffset = dataOffset, .comFunc = getComparFunc(type, 0)};
+ taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support);
+}
+
+//setup the output buffer for each operator
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) {
const static int32_t minSize = 8;
@@ -347,10 +411,10 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim
pResultRowInfo->capacity = (int32_t)newCapacity;
}
-static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData,
- int16_t bytes, bool masterscan, uint64_t uid) {
+static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, int64_t tid,
+ char* pData, int16_t bytes, bool masterscan, uint64_t tableGroupId) {
bool existed = false;
- SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid);
+ SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tableGroupId);
SResultRow **p1 =
(SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
@@ -362,16 +426,26 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
}
if (p1 != NULL) {
- for(int32_t i = pResultRowInfo->size - 1; i >= 0; --i) {
- if (pResultRowInfo->pResult[i] == (*p1)) {
- pResultRowInfo->curIndex = i;
+ if (pResultRowInfo->size == 0) {
+ existed = false;
+ assert(pResultRowInfo->curPos == -1);
+ } else if (pResultRowInfo->size == 1) {
+ existed = (pResultRowInfo->pResult[0] == (*p1));
+ pResultRowInfo->curPos = 0;
+ } else { // check if current pResultRowInfo contains the existed pResultRow
+ SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
+ int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
+ if (index != NULL) {
+ pResultRowInfo->curPos = (int32_t) *index;
existed = true;
- break;
+ } else {
+ existed = false;
}
}
}
} else {
- if (p1 != NULL) { // group by column query
+ // In case of group by column query, the required SResultRow object must be existed in the pResultRowInfo object.
+ if (p1 != NULL) {
return *p1;
}
}
@@ -393,8 +467,12 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
pResult = *p1;
}
- pResultRowInfo->pResult[pResultRowInfo->size] = pResult;
- pResultRowInfo->curIndex = pResultRowInfo->size++;
+ pResultRowInfo->curPos = pResultRowInfo->size;
+ pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
+
+ int64_t index = pResultRowInfo->curPos;
+ SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
+ taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
}
// too many time window in query
@@ -402,7 +480,7 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
- return getResultRow(pResultRowInfo, pResultRowInfo->curIndex);
+ return pResultRowInfo->pResult[pResultRowInfo->curPos];
}
static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) {
@@ -433,13 +511,8 @@ static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWin
static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) {
STimeWindow w = {0};
- if (pResultRowInfo->curIndex == -1) { // the first window, from the previous stored value
- if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) {
- getInitialStartTimeWindow(pQueryAttr, ts, &w);
- pResultRowInfo->prevSKey = w.skey;
- } else {
- w.skey = pResultRowInfo->prevSKey;
- }
+ if (pResultRowInfo->curPos == -1) { // the first window, from the previous stored value
+ getInitialStartTimeWindow(pQueryAttr, ts, &w);
if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') {
w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1;
@@ -447,9 +520,7 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t
w.ekey = w.skey + pQueryAttr->interval.interval - 1;
}
} else {
- int32_t slot = curTimeWindowIndex(pResultRowInfo);
- SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot);
- w = pWindowRes->win;
+ w = getResultRow(pResultRowInfo, pResultRowInfo->curPos)->win;
}
if (w.skey > ts || w.ekey < ts) {
@@ -529,13 +600,13 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf
return 0;
}
-static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win,
- bool masterscan, SResultRow **pResult, int64_t groupId, SQLFunctionCtx* pCtx,
+static int32_t setResultOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, STimeWindow *win,
+ bool masterscan, SResultRow **pResult, int64_t tableGroupId, SQLFunctionCtx* pCtx,
int32_t numOfOutput, int32_t* rowCellInfoOffset) {
assert(win->skey <= win->ekey);
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
- SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, groupId);
+ SResultRow *pResultRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&win->skey, TSDB_KEYSIZE, masterscan, tableGroupId);
if (pResultRow == NULL) {
*pResult = NULL;
return TSDB_CODE_SUCCESS;
@@ -543,7 +614,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRow
// not assign result buffer yet, add new result buffer
if (pResultRow->pageId == -1) {
- int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) groupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize);
+ int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) tableGroupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize);
if (ret != TSDB_CODE_SUCCESS) {
return -1;
}
@@ -634,7 +705,13 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
// all result rows are closed, set the last one to be the skey
if (skey == TSKEY_INITIAL_VAL) {
- pResultRowInfo->curIndex = pResultRowInfo->size - 1;
+ if (pResultRowInfo->size == 0) {
+// assert(pResultRowInfo->current == NULL);
+ assert(pResultRowInfo->curPos == -1);
+ pResultRowInfo->curPos = -1;
+ } else {
+ pResultRowInfo->curPos = pResultRowInfo->size - 1;
+ }
} else {
for (i = pResultRowInfo->size - 1; i >= 0; --i) {
@@ -645,12 +722,10 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
}
if (i == pResultRowInfo->size - 1) {
- pResultRowInfo->curIndex = i;
+ pResultRowInfo->curPos = i;
} else {
- pResultRowInfo->curIndex = i + 1; // current not closed result object
+ pResultRowInfo->curPos = i + 1; // current not closed result object
}
-
- pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey;
}
}
@@ -658,7 +733,7 @@ static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQuer
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) {
closeAllResultRows(pResultRowInfo);
- pResultRowInfo->curIndex = pResultRowInfo->size - 1;
+ pResultRowInfo->curPos = pResultRowInfo->size - 1;
} else {
int32_t step = ascQuery ? 1 : -1;
doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo);
@@ -912,7 +987,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
}
} else {
- if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) {
+ if (pBlock->pDataBlock != NULL) {
doSetInputDataBlock(pOperator, pCtx, pBlock, order);
} else {
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
@@ -978,7 +1053,7 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
}
}
-static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) {
+static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
for (int32_t k = 0; k < numOfOutput; ++k) {
@@ -1157,7 +1232,7 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
}
}
-static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t groupId) {
+static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) {
STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info;
SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv;
@@ -1167,7 +1242,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
- int32_t prevIndex = curTimeWindowIndex(pResultRowInfo);
+ int32_t prevIndex = pResultRowInfo->curPos;
TSKEY* tsCols = NULL;
if (pSDataBlock->pDataBlock != NULL) {
@@ -1184,7 +1259,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
SResultRow* pResult = NULL;
- int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx,
+ int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -1196,36 +1271,35 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true);
// prev time window not interpolation yet.
- int32_t curIndex = curTimeWindowIndex(pResultRowInfo);
+ int32_t curIndex = pResultRowInfo->curPos;
if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) {
for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already.
- SResultRow* pRes = pResultRowInfo->pResult[j];
+ SResultRow* pRes = getResultRow(pResultRowInfo, j);
if (pRes->closed) {
- assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) &&
- resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
+ assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
continue;
}
- STimeWindow w = pRes->win;
- ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &w, masterScan, &pResult, groupId, pInfo->pCtx,
- numOfOutput, pInfo->rowCellInfoOffset);
- if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
+ STimeWindow w = pRes->win;
+ ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult,
+ tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
+ if (ret != TSDB_CODE_SUCCESS) {
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
- assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
+ assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
- doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY *)pRuntimeEnv->prevRow[0],
- -1, tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP);
+ doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1,
+ tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP);
- setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
- setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP);
+ setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
+ setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP);
- doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput);
- }
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput);
+ }
// restore current time window
- ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx,
+ ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -1245,7 +1319,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
}
// null data, failed to allocate more memory buffer
- int32_t code = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &nextWin, masterScan, &pResult, groupId,
+ int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &nextWin, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -1282,11 +1356,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
return;
}
- int64_t* tsList = NULL;
SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0);
- if (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- tsList = (int64_t*) pFirstColData->pData;
- }
+ int64_t* tsList = (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (int64_t*) pFirstColData->pData:NULL;
STimeWindow w = TSWINDOW_INITIALIZER;
@@ -1319,12 +1390,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
}
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
- setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData,
- bytes);
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, bytes);
}
- int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes,
- item->groupIndex);
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
@@ -1340,18 +1409,18 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
memcpy(pInfo->prevData, val, bytes);
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
- setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val,
- bytes);
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
}
- int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes,
- item->groupIndex);
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
}
+
+ tfree(pInfo->prevData);
}
static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
@@ -1391,7 +1460,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
- int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
+ int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@@ -1412,7 +1481,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
- int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
+ int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@@ -1455,7 +1524,8 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
len = varDataLen(pData);
}
- SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, d, len, true, groupIndex);
+ int64_t tid = 0;
+ SResultRow *pResultRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, d, len, true, groupIndex);
assert (pResultRow != NULL);
setResultRowKey(pResultRow, pData, type);
@@ -1685,8 +1755,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
}
for(int32_t i = 1; i < numOfOutput; ++i) {
- (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) +
- pExpr[i - 1].base.interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
+ (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + pExpr[i - 1].base.interBytes);
}
setCtxTagColumnInfo(pFuncCtx, numOfOutput);
@@ -1720,11 +1789,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pQueryAttr = pQueryAttr;
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
+ pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
+
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen);
- pRuntimeEnv->currentOffset = pQueryAttr->limit.offset;
// NOTE: pTableCheckInfo need to update the query time range and the lastKey info
pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
@@ -1743,7 +1813,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQueryAttr->tableCols[i-1].bytes;
}
- *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN;
+ if (pQueryAttr->tableCols[0].type == TSDB_DATA_TYPE_TIMESTAMP) {
+ *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN;
+ }
}
qDebug("QInfo:0x%"PRIx64" init runtime environment completed", GET_QID(pRuntimeEnv));
@@ -1777,7 +1849,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_Groupby: {
pRuntimeEnv->proot =
createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
- setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+
+ int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
+ if (opType != OP_DummyInput) {
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ }
break;
}
case OP_SessionWindow: {
@@ -1806,17 +1882,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
break;
}
- case OP_Arithmetic: { // TODO refactor to remove arith operator.
+ case OP_Project: { // TODO refactor to remove arith operator.
SOperatorInfo* prev = pRuntimeEnv->proot;
if (i == 0) {
- pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
+ pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
if (pRuntimeEnv->proot != NULL && prev->operatorType != OP_DummyInput && prev->operatorType != OP_Join) { // TODO refactor
setTableScanFilterOperatorInfo(prev->info, pRuntimeEnv->proot);
}
} else {
prev = pRuntimeEnv->proot;
assert(pQueryAttr->pExpr2 != NULL);
- pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
+ pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
}
break;
}
@@ -1963,6 +2039,9 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap);
pRuntimeEnv->pTableRetrieveTsMap = NULL;
+ taosHashCleanup(pRuntimeEnv->pResultRowListSet);
+ pRuntimeEnv->pResultRowListSet = NULL;
+
destroyOperatorInfo(pRuntimeEnv->proot);
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
@@ -2671,10 +2750,6 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData
}
void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock) {
- if (numOfFilterCols > 0 && pFilterInfo[0].pData != NULL) {
- return;
- }
-
// set the initial static data value filter expression
for (int32_t i = 0; i < numOfFilterCols; ++i) {
for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) {
@@ -2749,7 +2824,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
- if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId,
+ if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -2795,7 +2870,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
- if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId,
+ if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -3139,7 +3214,7 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl
}
-static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQueryInfo *pTableQueryInfo) {
+static void updateTableQueryInfoForReverseScan(STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) {
return;
}
@@ -3151,7 +3226,12 @@ static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQue
pTableQueryInfo->cur.vgroupIndex = -1;
// set the index to be the end slot of result rows array
- pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1;
+ SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo;
+ if (pResultRowInfo->size > 0) {
+ pResultRowInfo->curPos = pResultRowInfo->size - 1;
+ } else {
+ pResultRowInfo->curPos = -1;
+ }
}
static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
@@ -3165,7 +3245,7 @@ static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
size_t t = taosArrayGetSize(group);
for (int32_t j = 0; j < t; ++j) {
STableQueryInfo *pCheckInfo = taosArrayGetP(group, j);
- updateTableQueryInfoForReverseScan(pQueryAttr, pCheckInfo);
+ updateTableQueryInfoForReverseScan(pCheckInfo);
// update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide
// the start check timestamp of tsdbQueryHandle
@@ -3204,8 +3284,8 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset;
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
- int32_t tid = 0;
- SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&tid, sizeof(tid), true, uid);
+ int64_t tid = 0;
+ SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
SColumnInfoData* pData = taosArrayGet(pDataBlock->pDataBlock, i);
@@ -3436,10 +3516,13 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
}
void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx,
- int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex) {
+ int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t tableGroupId) {
+ // for simple group by query without interval, all the tables belong to one group result.
int64_t uid = 0;
+ int64_t tid = 0;
+
SResultRow* pResultRow =
- doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char*)&groupIndex, sizeof(groupIndex), true, uid);
+ doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char*)&tableGroupId, sizeof(tableGroupId), true, uid);
assert (pResultRow != NULL);
/*
@@ -3447,7 +3530,7 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe
* all group belong to one result set, and each group result has different group id so set the id to be one
*/
if (pResultRow->pageId == -1) {
- int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->pQueryAttr->resultRowSize);
+ int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, tableGroupId, pRuntimeEnv->pQueryAttr->resultRowSize);
if (ret != TSDB_CODE_SUCCESS) {
return;
}
@@ -3456,20 +3539,20 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe
setResultRowOutputBufInitCtx(pRuntimeEnv, pResultRow, pCtx, numOfOutput, rowCellInfoOffset);
}
-void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t groupIndex,
+void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t tableGroupId,
TSKEY nextKey) {
STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current;
// lastKey needs to be updated
pTableQueryInfo->lastKey = nextKey;
- if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) {
+ if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == tableGroupId) {
return;
}
- doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, groupIndex);
+ doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, tableGroupId);
// record the current active group id
- pRuntimeEnv->prevGroupId = groupIndex;
+ pRuntimeEnv->prevGroupId = tableGroupId;
}
void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx,
@@ -3640,9 +3723,9 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction
void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current;
- SResultRowInfo *pWindowResInfo = &pTableQueryInfo->resInfo;
+ SResultRowInfo *pResultRowInfo = &pTableQueryInfo->resInfo;
- if (pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL) {
+ if (pResultRowInfo->curPos != -1) {
return;
}
@@ -3661,13 +3744,13 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
TSKEY ek = MAX(win.skey, win.ekey);
getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w);
- if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
- if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
- assert(win.ekey == pQueryAttr->window.ekey);
- }
-
- pWindowResInfo->prevSKey = w.skey;
- }
+// if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) {
+// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
+// assert(win.ekey == pQueryAttr->window.ekey);
+// }
+//
+// pResultRowInfo->prevSKey = w.skey;
+// }
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
}
@@ -3710,6 +3793,9 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
}
int32_t numOfRowsToCopy = pRow->numOfRows;
+ if (numOfResult + numOfRowsToCopy >= pRuntimeEnv->resultInfo.capacity) {
+ break;
+ }
pGroupResInfo->index += 1;
@@ -3843,6 +3929,106 @@ int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutp
return pOutput->info.rows;
}
+void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType) {
+ SQueryProfEvent event = {0};
+
+ event.eventType = eventType;
+ event.eventTime = taosGetTimestampUs();
+ event.operatorType = operatorInfo->operatorType;
+
+ if (operatorInfo->pRuntimeEnv) {
+ SQInfo* pQInfo = operatorInfo->pRuntimeEnv->qinfo;
+ if (pQInfo->summary.queryProfEvents) {
+ taosArrayPush(pQInfo->summary.queryProfEvents, &event);
+ }
+ }
+}
+
+void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) {
+ SQueryProfEvent event;
+ event.eventType = QUERY_PROF_QUERY_ABORT;
+ event.eventTime = taosGetTimestampUs();
+ event.abortCode = code;
+
+ if (pQInfo->summary.queryProfEvents) {
+ taosArrayPush(pQInfo->summary.queryProfEvents, &event);
+ }
+}
+
+typedef struct {
+ uint8_t operatorType;
+ int64_t beginTime;
+ int64_t endTime;
+ int64_t selfTime;
+ int64_t descendantsTime;
+} SOperatorStackItem;
+
+static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* event, SArray* opStack, SHashObj* profResults) {
+ item->endTime = event->eventTime;
+ item->selfTime = (item->endTime - item->beginTime) - (item->descendantsTime);
+
+ for (int32_t j = 0; j < taosArrayGetSize(opStack); ++j) {
+ SOperatorStackItem* ancestor = taosArrayGet(opStack, j);
+ ancestor->descendantsTime += item->selfTime;
+ }
+
+ uint8_t operatorType = item->operatorType;
+ SOperatorProfResult* result = taosHashGet(profResults, &operatorType, sizeof(operatorType));
+ if (result != NULL) {
+ result->sumRunTimes++;
+ result->sumSelfTime += item->selfTime;
+ } else {
+ SOperatorProfResult opResult;
+ opResult.operatorType = operatorType;
+ opResult.sumSelfTime = item->selfTime;
+ opResult.sumRunTimes = 1;
+ taosHashPut(profResults, &(operatorType), sizeof(operatorType),
+ &opResult, sizeof(opResult));
+ }
+}
+
+void calculateOperatorProfResults(SQInfo* pQInfo) {
+ if (pQInfo->summary.queryProfEvents == NULL) {
+ qDebug("QInfo:0x%"PRIx64" query prof events array is null", pQInfo->qId);
+ return;
+ }
+
+ if (pQInfo->summary.operatorProfResults == NULL) {
+ qDebug("QInfo:0x%"PRIx64" operator prof results hash is null", pQInfo->qId);
+ return;
+ }
+
+ SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem));
+ if (opStack == NULL) {
+ return;
+ }
+
+ size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents);
+ SHashObj* profResults = pQInfo->summary.operatorProfResults;
+
+ for (int i = 0; i < size; ++i) {
+ SQueryProfEvent* event = taosArrayGet(pQInfo->summary.queryProfEvents, i);
+ if (event->eventType == QUERY_PROF_BEFORE_OPERATOR_EXEC) {
+ SOperatorStackItem opItem;
+ opItem.operatorType = event->operatorType;
+ opItem.beginTime = event->eventTime;
+ opItem.descendantsTime = 0;
+ taosArrayPush(opStack, &opItem);
+ } else if (event->eventType == QUERY_PROF_AFTER_OPERATOR_EXEC) {
+ SOperatorStackItem* item = taosArrayPop(opStack);
+ assert(item->operatorType == event->operatorType);
+ doOperatorExecProfOnce(item, event, opStack, profResults);
+ } else if (event->eventType == QUERY_PROF_QUERY_ABORT) {
+ SOperatorStackItem* item;
+ while ((item = taosArrayPop(opStack)) != NULL) {
+ doOperatorExecProfOnce(item, event, opStack, profResults);
+ }
+ }
+ }
+
+ taosArrayDestroy(opStack);
+}
+
void queryCostStatis(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQueryCostInfo *pSummary = &pQInfo->summary;
@@ -3863,6 +4049,8 @@ void queryCostStatis(SQInfo *pQInfo) {
pSummary->numOfTimeWindows = 0;
}
+ calculateOperatorProfResults(pQInfo);
+
qDebug("QInfo:0x%"PRIx64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, "
"load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
@@ -3870,6 +4058,15 @@ void queryCostStatis(SQInfo *pQInfo) {
qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
+
+ if (pSummary->operatorProfResults) {
+ SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL);
+ while (opRes != NULL) {
+ qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64,
+ pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime);
+ opRes = taosHashIterate(pSummary->operatorProfResults, opRes);
+ }
+ }
}
//static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) {
@@ -4203,8 +4400,8 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
return pFillCol;
}
-int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr, int32_t tbScanner,
- SArray* pOperator, void* param) {
+int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr, int32_t tbScanner, SArray* pOperator,
+ void* param) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
@@ -4271,6 +4468,15 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
// create runtime environment
int32_t numOfTables = (int32_t)pQueryAttr->tableGroupInfo.numOfTables;
pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo));
+ pQInfo->summary.queryProfEvents = taosArrayInit(512, sizeof(SQueryProfEvent));
+ if (pQInfo->summary.queryProfEvents == NULL) {
+ qDebug("QInfo:0x%"PRIx64" failed to allocate query prof events array", pQInfo->qId);
+ }
+ pQInfo->summary.operatorProfResults =
+ taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK);
+ if (pQInfo->summary.operatorProfResults == NULL) {
+ qDebug("QInfo:0x%"PRIx64" failed to allocate operator prof results hash", pQInfo->qId);
+ }
code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param);
if (code != TSDB_CODE_SUCCESS) {
@@ -4444,8 +4650,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
}
if (pResultRowInfo->size > 0) {
- pResultRowInfo->curIndex = 0;
- pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey;
+ pResultRowInfo->curPos = 0;
}
qDebug("QInfo:0x%"PRIx64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
@@ -4470,8 +4675,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
pTableScanInfo->order = cond.order;
if (pResultRowInfo->size > 0) {
- pResultRowInfo->curIndex = pResultRowInfo->size-1;
- pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->size-1]->win.skey;
+ pResultRowInfo->curPos = pResultRowInfo->size - 1;
}
p = doTableScanImpl(pOperator, newgroup);
@@ -4630,8 +4834,8 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
pTableScanInfo->pResultRowInfo = &pInfo->resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pInfo->rowCellInfoOffset;
- } else if (pDownstream->operatorType == OP_Arithmetic) {
- SArithOperatorInfo *pInfo = pDownstream->info;
+ } else if (pDownstream->operatorType == OP_Project) {
+ SProjectOperatorInfo *pInfo = pDownstream->info;
pTableScanInfo->pCtx = pInfo->binfo.pCtx;
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
@@ -4895,7 +5099,10 @@ static SSDataBlock* doAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -4950,7 +5157,10 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -4986,23 +5196,23 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
return pInfo->pRes;
}
-static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
+static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
- SArithOperatorInfo* pArithInfo = pOperator->info;
+ SProjectOperatorInfo* pProjectInfo = pOperator->info;
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
- SOptrBasicInfo *pInfo = &pArithInfo->binfo;
+ SOptrBasicInfo *pInfo = &pProjectInfo->binfo;
SSDataBlock* pRes = pInfo->pRes;
int32_t order = pRuntimeEnv->pQueryAttr->order.order;
pRes->info.rows = 0;
- if (pArithInfo->existDataBlock) { // TODO refactor
+ if (pProjectInfo->existDataBlock) { // TODO refactor
STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
- SSDataBlock* pBlock = pArithInfo->existDataBlock;
- pArithInfo->existDataBlock = NULL;
+ SSDataBlock* pBlock = pProjectInfo->existDataBlock;
+ pProjectInfo->existDataBlock = NULL;
*newgroup = true;
// todo dynamic set tags
@@ -5012,9 +5222,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
- updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
+ updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows);
- arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
+ projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
}
@@ -5030,7 +5240,10 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
bool prevVal = *newgroup;
// The upstream exec may change the value of the newgroup, so use a local variable instead.
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
assert(*newgroup == false);
@@ -5042,7 +5255,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// Return result of the previous group in the firstly.
if (*newgroup) {
if (pRes->info.rows > 0) {
- pArithInfo->existDataBlock = pBlock;
+ pProjectInfo->existDataBlock = pBlock;
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return pInfo->pRes;
} else { // init output buffer for a new group data
@@ -5062,9 +5275,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
- updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
+ updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows);
- arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
+ projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
}
@@ -5090,7 +5303,10 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
SSDataBlock* pBlock = NULL;
while (1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -5140,7 +5356,10 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
while (1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5185,7 +5404,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5238,7 +5460,10 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5310,7 +5535,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
} else {
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
- int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
+ int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@@ -5327,10 +5552,11 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
}
}
+
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
- int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
+ int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@@ -5366,7 +5592,10 @@ static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
STimeWindow win = pQueryAttr->window;
SOperatorInfo* upstream = pOperator->upstream[0];
while (1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5424,7 +5653,9 @@ static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -5475,7 +5706,9 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -5501,8 +5734,11 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
}
initGroupResInfo(&pRuntimeEnv->groupResInfo, &pInfo->binfo.resultRowInfo);
+ if (!pRuntimeEnv->pQueryAttr->stableQuery) {
+ sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes);
+ }
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes);
-
+
if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
@@ -5541,7 +5777,10 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
}
while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (*newgroup) {
assert(pBlock != NULL);
}
@@ -5701,8 +5940,8 @@ static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) {
tfree(pInfo->prevData);
}
-static void destroyArithOperatorInfo(void* param, int32_t numOfOutput) {
- SArithOperatorInfo* pInfo = (SArithOperatorInfo*) param;
+static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
+ SProjectOperatorInfo* pInfo = (SProjectOperatorInfo*) param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
}
@@ -5748,8 +5987,8 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO
return pOperator;
}
-SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
- SArithOperatorInfo* pInfo = calloc(1, sizeof(SArithOperatorInfo));
+SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
+ SProjectOperatorInfo* pInfo = calloc(1, sizeof(SProjectOperatorInfo));
pInfo->seed = rand();
pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity;
@@ -5762,8 +6001,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
- pOperator->name = "ArithmeticOperator";
- pOperator->operatorType = OP_Arithmetic;
+ pOperator->name = "ProjectOperator";
+ pOperator->operatorType = OP_Project;
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->info = pInfo;
@@ -5771,8 +6010,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
pOperator->numOfOutput = numOfOutput;
pOperator->pRuntimeEnv = pRuntimeEnv;
- pOperator->exec = doArithmeticOperation;
- pOperator->cleanup = destroyArithOperatorInfo;
+ pOperator->exec = doProjectOperation;
+ pOperator->cleanup = destroyProjectOperatorInfo;
appendUpstream(pOperator, upstream);
return pOperator;
@@ -5946,7 +6185,14 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo));
pInfo->colIndex = -1; // group by column index
+
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
+
+ SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
+
+ pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize *
+ (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)));
+
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
@@ -6088,8 +6334,8 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0);
- while(pInfo->currentIndex < pInfo->totalTables && count < maxNumOfTables) {
- int32_t i = pInfo->currentIndex++;
+ while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
+ int32_t i = pInfo->curPos++;
STableQueryInfo *item = taosArrayGetP(pa, i);
char *output = pColInfo->pData + count * rsize;
@@ -6133,8 +6379,8 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo
count = 0;
- while(pInfo->currentIndex < pInfo->totalTables && count < maxNumOfTables) {
- int32_t i = pInfo->currentIndex++;
+ while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
+ int32_t i = pInfo->curPos++;
STableQueryInfo* item = taosArrayGetP(pa, i);
@@ -6163,7 +6409,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
count += 1;
}
- if (pInfo->currentIndex >= pInfo->totalTables) {
+ if (pInfo->curPos >= pInfo->totalTables) {
pOperator->status = OP_EXEC_DONE;
}
@@ -6182,7 +6428,7 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
assert(numOfGroup == 0 || numOfGroup == 1);
pInfo->totalTables = pRuntimeEnv->tableqinfoGroupInfo.numOfTables;
- pInfo->currentIndex = 0;
+ pInfo->curPos = 0;
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "SeqTableTagScan";
@@ -6211,7 +6457,10 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -7134,6 +7383,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId) {
doCreateFilterInfo(pQueryAttr->tableCols, pQueryAttr->numOfCols, pQueryAttr->numOfFilterCols,
&pQueryAttr->pFilterInfo, qId);
+ pQueryAttr->createFilterOperator = true;
+
return TSDB_CODE_SUCCESS;
}
@@ -7438,6 +7689,7 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
pRuntimeEnv->prevResult = prevResult;
}
+ pRuntimeEnv->currentOffset = pQueryAttr->limit.offset;
if (tsdb != NULL) {
pQueryAttr->precision = tsdbGetCfg(tsdb)->precision;
}
@@ -7569,6 +7821,9 @@ void freeQInfo(SQInfo *pQInfo) {
tfree(pQInfo->pBuf);
tfree(pQInfo->sql);
+ taosArrayDestroy(pQInfo->summary.queryProfEvents);
+ taosHashCleanup(pQInfo->summary.operatorProfResults);
+
taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows);
pQInfo->signature = 0;
diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c
index 627f7eb13ff891835d099f9ca915ac219f9e708d..8c55b7fd94805bc5b584178ce6c1490b619a459e 100644
--- a/src/query/src/qFilter.c
+++ b/src/query/src/qFilter.c
@@ -1262,7 +1262,7 @@ int32_t filterProcessGroupsSameColumn(SFilterInfo *info, uint16_t id1, uint16_t
cidx2 = cra->idx;
} else {
u1 = FILTER_GROUP_UNIT(info, g, 0);
- int32_t type = FILTER_UNIT_DATA_TYPE(u1);
+ type = FILTER_UNIT_DATA_TYPE(u1);
if (FILTER_NO_MERGE_DATA_TYPE(type)) {
continue;
}
@@ -1547,8 +1547,8 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx* gctx, SFilterGroupCtx*
if (ctx->colRange && taosArrayGetSize(ctx->colRange) > 0) {
int32_t size = (int32_t)taosArrayGetSize(ctx->colRange);
- for (int32_t i = 0; i < size; ++i) {
- SFilterColRange *cra = taosArrayGet(ctx->colRange, i);
+ for (int32_t m = 0; m < size; ++m) {
+ SFilterColRange *cra = taosArrayGet(ctx->colRange, m);
filterAddGroupUnitFromRange(info, &oinfo, cra, &ng, TSDB_RELATION_AND, NULL);
}
}
@@ -1799,10 +1799,10 @@ int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win) {
ERR_JRET(TSDB_CODE_QRY_INVALID_TIME_CONDITION);
}
- SFilterRange ra;
- filterGetMergeRangeRes(prev, &ra);
- win->skey = ra.s;
- win->ekey = ra.e;
+ SFilterRange tra;
+ filterGetMergeRangeRes(prev, &tra);
+ win->skey = tra.s;
+ win->ekey = tra.e;
}
}
@@ -1815,4 +1815,45 @@ _err_return:
}
+int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar) {
+#if 0
+ for (int32_t i = 0; i < numOfFilterCols; ++i) {
+ if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
+ pFilterInfo[i].pData2 = pFilterInfo[i].pData;
+ pFilterInfo[i].pData = malloc(rows * pFilterInfo[i].info.bytes);
+ int32_t bufSize = pFilterInfo[i].info.bytes - VARSTR_HEADER_SIZE;
+ for (int32_t j = 0; j < rows; ++j) {
+ char* dst = (char *)pFilterInfo[i].pData + j * pFilterInfo[i].info.bytes;
+ char* src = (char *)pFilterInfo[i].pData2 + j * pFilterInfo[i].info.bytes;
+ int32_t len = 0;
+ taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
+ varDataLen(dst) = len;
+ }
+ *gotNchar = true;
+ }
+ }
+#endif
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo) {
+#if 0
+ for (int32_t i = 0; i < numOfFilterCols; ++i) {
+ if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
+ if (pFilterInfo[i].pData2) {
+ tfree(pFilterInfo[i].pData);
+ pFilterInfo[i].pData = pFilterInfo[i].pData2;
+ pFilterInfo[i].pData2 = NULL;
+ }
+ }
+ }
+#endif
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+
+
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index ee587a515dca39559bc6d061501d4e3397c0781a..e724b0418c5fe5e9a34459e09cf37c535d3236f2 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -127,7 +127,8 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
SColumn* pCol = taosArrayGetP(tableCols, i);
SColumnIndex index = {.tableIndex = 0, .columnIndex = pCol->columnIndex};
- SExprInfo* p = tscExprCreate(pQueryInfo, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes,
+ STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes,
pCol->info.colId, 0, TSDB_COL_NORMAL);
strncpy(p->base.aliasName, pSchema[pCol->columnIndex].name, tListLen(p->base.aliasName));
@@ -565,7 +566,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
@@ -585,7 +586,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->sw.gap > 0) {
@@ -593,7 +594,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->stateWindow) {
@@ -601,7 +602,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->simpleAgg) {
@@ -619,15 +620,15 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else { // diff/add/multiply/subtract/division
- if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->vgId == 0) { // todo refactor
+ if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->createFilterOperator && pQueryAttr->vgId == 0) { // todo refactor
op = OP_Filter;
taosArrayPush(plan, &op);
} else {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
}
@@ -665,7 +666,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
}
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index 7052e8e38b869c8ad1c5ebb1d702b30c8c22a6f8..919ecdade8e71bcc09137858acdddc616b6b43a8 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -124,7 +124,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
tSqlExpr *pSqlExpr = calloc(1, sizeof(tSqlExpr));
if (pToken != NULL) {
- pSqlExpr->token = *pToken;
+ pSqlExpr->exprToken = *pToken;
}
if (optrType == TK_NULL) {
@@ -161,7 +161,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
// Here it must be the column name (tk_id) if it is not a number or string.
assert(optrType == TK_ID || optrType == TK_ALL);
if (pToken != NULL) {
- pSqlExpr->colInfo = *pToken;
+ pSqlExpr->columnName = *pToken;
}
pSqlExpr->tokenId = optrType;
@@ -180,17 +180,17 @@ tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToke
return NULL;
}
- tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
- pExpr->tokenId = optType;
- pExpr->type = SQL_NODE_SQLFUNCTION;
- pExpr->pParam = pParam;
+ tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
+ pExpr->tokenId = optType;
+ pExpr->type = SQL_NODE_SQLFUNCTION;
+ pExpr->Expr.paramList = pParam;
int32_t len = (int32_t)((endToken->z + endToken->n) - pFuncToken->z);
- pExpr->operand = (*pFuncToken);
+ pExpr->Expr.operand = (*pFuncToken);
- pExpr->token.n = len;
- pExpr->token.z = pFuncToken->z;
- pExpr->token.type = pFuncToken->type;
+ pExpr->exprToken.n = len;
+ pExpr->exprToken.z = pFuncToken->z;
+ pExpr->exprToken.type = pFuncToken->type;
return pExpr;
}
@@ -204,16 +204,16 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
pExpr->type = SQL_NODE_EXPR;
if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) {
- char* endPos = pRight->token.z + pRight->token.n;
- pExpr->token.z = pLeft->token.z;
- pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
- pExpr->token.type = pLeft->token.type;
+ char* endPos = pRight->exprToken.z + pRight->exprToken.n;
+ pExpr->exprToken.z = pLeft->exprToken.z;
+ pExpr->exprToken.n = (uint32_t)(endPos - pExpr->exprToken.z);
+ pExpr->exprToken.type = pLeft->exprToken.type;
}
if ((pLeft != NULL && pRight != NULL) &&
(optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) {
/*
- * if a token is noted as the TK_TIMESTAMP, the time precision is microsecond
+ * if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond
* Otherwise, the time precision is adaptive, determined by the time precision from databases.
*/
if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) ||
@@ -304,7 +304,7 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
tSqlExpr *pRSub = calloc(1, sizeof(tSqlExpr));
pRSub->tokenId = TK_SET; // TODO refactor .....
- pRSub->pParam = (SArray *)pRight;
+ pRSub->Expr.paramList = (SArray *)pRight;
pExpr->pRight = pRSub;
} else {
@@ -346,8 +346,8 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|| (left->pLeft == NULL && right->pLeft)
|| (left->pRight && right->pRight == NULL)
|| (left->pRight == NULL && right->pRight)
- || (left->pParam && right->pParam == NULL)
- || (left->pParam == NULL && right->pParam)) {
+ || (left->Expr.paramList && right->Expr.paramList == NULL)
+ || (left->Expr.paramList == NULL && right->Expr.paramList)) {
return 1;
}
@@ -355,20 +355,20 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
return 1;
}
- if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
+ if (tStrTokenCompare(&left->columnName, &right->columnName)) {
return 1;
}
- if (right->pParam && left->pParam) {
- size_t size = taosArrayGetSize(right->pParam);
- if (left->pParam && taosArrayGetSize(left->pParam) != size) {
+ if (right->Expr.paramList && left->Expr.paramList) {
+ size_t size = taosArrayGetSize(right->Expr.paramList);
+ if (left->Expr.paramList && taosArrayGetSize(left->Expr.paramList) != size) {
return 1;
}
for (int32_t i = 0; i < size; i++) {
- tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
+ tSqlExprItem* pLeftElem = taosArrayGet(left->Expr.paramList, i);
tSqlExpr* pSubLeft = pLeftElem->pNode;
- tSqlExprItem* pRightElem = taosArrayGet(right->pParam, i);
+ tSqlExprItem* pRightElem = taosArrayGet(right->Expr.paramList, i);
tSqlExpr* pSubRight = pRightElem->pNode;
if (tSqlExprCompare(pSubLeft, pSubRight)) {
@@ -404,8 +404,8 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
memset(&pExpr->value, 0, sizeof(pExpr->value));
tVariantAssign(&pExpr->value, &pSrc->value);
- //we don't clone pParam now because clone is only used for between/and
- assert(pSrc->pParam == NULL);
+ //we don't clone paramList now because clone is only used for between/and
+ assert(pSrc->Expr.paramList == NULL);
return pExpr;
}
@@ -463,7 +463,7 @@ static void doDestroySqlExprNode(tSqlExpr *pExpr) {
tVariantDestroy(&pExpr->value);
}
- tSqlExprListDestroy(pExpr->pParam);
+ tSqlExprListDestroy(pExpr->Expr.paramList);
free(pExpr);
}
@@ -941,7 +941,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
taosArrayDestroy(pInfo->pMiscInfo->a);
}
- if (pInfo->pMiscInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) {
+ if (pInfo->pMiscInfo != NULL && (pInfo->type == TSDB_SQL_CREATE_DB || pInfo->type == TSDB_SQL_ALTER_DB)) {
taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant);
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 7b08450d3b9461c5ccfc316485f12c5d4ea84111..04a7079128ac035542611f06559409a81bc43cf1 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -44,8 +44,7 @@ int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) {
int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t type) {
pResultRowInfo->type = type;
pResultRowInfo->size = 0;
- pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
- pResultRowInfo->curIndex = -1;
+ pResultRowInfo->curPos = -1;
pResultRowInfo->capacity = size;
pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES);
@@ -90,10 +89,9 @@ void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRo
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid);
taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex)));
}
-
- pResultRowInfo->curIndex = -1;
- pResultRowInfo->size = 0;
- pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
+
+ pResultRowInfo->size = 0;
+ pResultRowInfo->curPos = -1;
}
int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) {
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index 6726cf7055595258d633919e2d0d69f6782441df..dd20716388436c0d9487e693bde3ba5fc9b142b1 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -240,6 +240,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
// error occurs, record the error code and return to client
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
if (ret != TSDB_CODE_SUCCESS) {
+ publishQueryAbortEvent(pQInfo, ret);
pQInfo->code = ret;
qDebug("QInfo:0x%"PRIx64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code));
return doBuildResCheck(pQInfo);
@@ -248,7 +249,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
qDebug("QInfo:0x%"PRIx64" query task is launched", pQInfo->qId);
bool newgroup = false;
+ publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup);
+ publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC);
pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv);
if (isQueryKilled(pQInfo)) {
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index d3f478ebeb5fefe8bba2df859028dc8b97e66527..7d5748e29fd3d17783b8bbc07ae7a43cd49efc2a 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -129,16 +129,16 @@ typedef union {
#define ParseARG_STORE yypParser->pInfo = pInfo
#define YYFALLBACK 1
#define YYNSTATE 347
-#define YYNRULE 285
+#define YYNRULE 283
#define YYNTOKEN 190
#define YY_MAX_SHIFT 346
-#define YY_MIN_SHIFTREDUCE 549
-#define YY_MAX_SHIFTREDUCE 833
-#define YY_ERROR_ACTION 834
-#define YY_ACCEPT_ACTION 835
-#define YY_NO_ACTION 836
-#define YY_MIN_REDUCE 837
-#define YY_MAX_REDUCE 1121
+#define YY_MIN_SHIFTREDUCE 547
+#define YY_MAX_SHIFTREDUCE 829
+#define YY_ERROR_ACTION 830
+#define YY_ACCEPT_ACTION 831
+#define YY_NO_ACTION 832
+#define YY_MIN_REDUCE 833
+#define YY_MAX_REDUCE 1115
/************* End control #defines *******************************************/
/* Define the yytestcase() macro to be a no-op if is not already defined
@@ -204,82 +204,82 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (735)
+#define YY_ACTTAB_COUNT (731)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 23, 598, 1010, 598, 219, 344, 194, 835, 346, 599,
- /* 10 */ 598, 599, 197, 54, 55, 225, 58, 59, 599, 988,
- /* 20 */ 239, 48, 1097, 57, 300, 62, 60, 63, 61, 1001,
- /* 30 */ 1001, 231, 233, 53, 52, 988, 988, 51, 50, 49,
- /* 40 */ 54, 55, 35, 58, 59, 222, 223, 239, 48, 598,
- /* 50 */ 57, 300, 62, 60, 63, 61, 1001, 599, 152, 236,
- /* 60 */ 53, 52, 235, 152, 51, 50, 49, 55, 1007, 58,
- /* 70 */ 59, 630, 261, 239, 48, 240, 57, 300, 62, 60,
- /* 80 */ 63, 61, 29, 83, 982, 221, 53, 52, 145, 985,
- /* 90 */ 51, 50, 49, 550, 551, 552, 553, 554, 555, 556,
- /* 100 */ 557, 558, 559, 560, 561, 562, 345, 80, 772, 220,
+ /* 0 */ 23, 596, 1004, 596, 219, 344, 194, 831, 346, 597,
+ /* 10 */ 596, 597, 197, 54, 55, 225, 58, 59, 597, 982,
+ /* 20 */ 239, 48, 1091, 57, 302, 62, 60, 63, 61, 995,
+ /* 30 */ 995, 231, 233, 53, 52, 982, 982, 51, 50, 49,
+ /* 40 */ 54, 55, 35, 58, 59, 222, 223, 239, 48, 596,
+ /* 50 */ 57, 302, 62, 60, 63, 61, 995, 597, 152, 236,
+ /* 60 */ 53, 52, 235, 152, 51, 50, 49, 55, 1001, 58,
+ /* 70 */ 59, 770, 261, 239, 48, 240, 57, 302, 62, 60,
+ /* 80 */ 63, 61, 29, 83, 976, 221, 53, 52, 145, 979,
+ /* 90 */ 51, 50, 49, 548, 549, 550, 551, 552, 553, 554,
+ /* 100 */ 555, 556, 557, 558, 559, 560, 345, 771, 768, 220,
/* 110 */ 95, 77, 54, 55, 35, 58, 59, 42, 197, 239,
- /* 120 */ 48, 197, 57, 300, 62, 60, 63, 61, 1098, 232,
- /* 130 */ 1046, 1098, 53, 52, 197, 89, 51, 50, 49, 54,
- /* 140 */ 56, 264, 58, 59, 1098, 976, 239, 48, 974, 57,
- /* 150 */ 300, 62, 60, 63, 61, 268, 267, 229, 298, 53,
- /* 160 */ 52, 985, 248, 51, 50, 49, 41, 296, 339, 338,
- /* 170 */ 295, 294, 293, 337, 292, 336, 335, 334, 291, 333,
- /* 180 */ 332, 948, 936, 937, 938, 939, 940, 941, 942, 943,
- /* 190 */ 944, 945, 946, 947, 949, 950, 58, 59, 24, 986,
- /* 200 */ 239, 48, 253, 57, 300, 62, 60, 63, 61, 35,
- /* 210 */ 195, 257, 256, 53, 52, 205, 330, 51, 50, 49,
- /* 220 */ 53, 52, 206, 14, 51, 50, 49, 129, 128, 204,
- /* 230 */ 298, 238, 787, 305, 83, 776, 81, 779, 116, 782,
- /* 240 */ 200, 238, 787, 883, 35, 776, 330, 779, 179, 782,
- /* 250 */ 114, 108, 119, 94, 91, 1094, 984, 118, 124, 127,
- /* 260 */ 117, 987, 35, 217, 218, 152, 121, 301, 42, 41,
- /* 270 */ 264, 339, 338, 217, 218, 242, 337, 1093, 336, 335,
- /* 280 */ 334, 704, 333, 332, 701, 1117, 702, 230, 703, 1092,
- /* 290 */ 260, 985, 75, 956, 680, 954, 955, 340, 917, 213,
- /* 300 */ 957, 247, 959, 960, 958, 309, 961, 962, 152, 985,
- /* 310 */ 64, 35, 244, 245, 1, 167, 62, 60, 63, 61,
- /* 320 */ 64, 893, 320, 319, 53, 52, 179, 1109, 51, 50,
- /* 330 */ 49, 5, 38, 169, 92, 282, 215, 88, 168, 102,
- /* 340 */ 97, 101, 788, 783, 720, 76, 243, 35, 241, 784,
- /* 350 */ 308, 307, 788, 783, 310, 188, 186, 184, 985, 784,
- /* 360 */ 35, 35, 183, 132, 131, 130, 971, 972, 34, 975,
- /* 370 */ 35, 68, 249, 35, 246, 35, 315, 314, 973, 778,
- /* 380 */ 1047, 781, 280, 884, 51, 50, 49, 777, 179, 780,
- /* 390 */ 311, 705, 706, 71, 985, 343, 342, 137, 143, 141,
- /* 400 */ 140, 90, 717, 312, 316, 3, 180, 985, 985, 753,
- /* 410 */ 754, 774, 33, 317, 69, 78, 318, 985, 322, 302,
- /* 420 */ 985, 262, 985, 736, 724, 744, 745, 690, 216, 9,
- /* 430 */ 285, 36, 692, 147, 72, 65, 26, 36, 287, 691,
- /* 440 */ 36, 288, 65, 808, 789, 237, 597, 775, 93, 65,
- /* 450 */ 16, 74, 15, 25, 25, 107, 25, 106, 18, 709,
- /* 460 */ 17, 710, 707, 198, 708, 6, 20, 113, 19, 112,
- /* 470 */ 199, 22, 201, 21, 126, 125, 196, 202, 203, 679,
- /* 480 */ 208, 209, 210, 207, 193, 1057, 1056, 227, 1053, 1052,
- /* 490 */ 228, 321, 258, 785, 144, 1009, 45, 1020, 1017, 1018,
- /* 500 */ 1002, 786, 265, 1022, 1039, 163, 146, 150, 274, 1038,
- /* 510 */ 983, 164, 142, 981, 165, 166, 791, 896, 290, 43,
- /* 520 */ 735, 999, 191, 39, 279, 154, 269, 299, 892, 306,
- /* 530 */ 224, 1116, 104, 1115, 271, 1112, 278, 170, 73, 70,
- /* 540 */ 153, 47, 313, 283, 1108, 281, 110, 1107, 1104, 171,
- /* 550 */ 914, 155, 277, 40, 37, 44, 275, 156, 192, 273,
- /* 560 */ 880, 120, 878, 122, 123, 158, 876, 875, 270, 250,
- /* 570 */ 182, 873, 872, 871, 870, 869, 46, 868, 185, 331,
- /* 580 */ 187, 865, 863, 861, 859, 189, 856, 190, 115, 263,
- /* 590 */ 79, 84, 272, 323, 1040, 324, 325, 326, 327, 328,
- /* 600 */ 329, 341, 833, 252, 214, 832, 251, 254, 234, 289,
- /* 610 */ 255, 831, 814, 813, 259, 211, 212, 264, 98, 10,
- /* 620 */ 99, 82, 284, 712, 266, 85, 30, 874, 737, 148,
- /* 630 */ 867, 174, 133, 173, 915, 172, 175, 176, 178, 177,
- /* 640 */ 134, 135, 916, 136, 866, 952, 858, 4, 857, 740,
- /* 650 */ 162, 159, 157, 149, 86, 160, 964, 2, 161, 742,
- /* 660 */ 87, 226, 276, 31, 746, 151, 11, 32, 13, 12,
- /* 670 */ 27, 28, 286, 643, 94, 96, 639, 637, 636, 635,
- /* 680 */ 632, 297, 7, 100, 602, 790, 303, 792, 8, 304,
- /* 690 */ 103, 682, 66, 105, 36, 67, 109, 111, 681, 678,
- /* 700 */ 624, 622, 614, 620, 616, 618, 612, 610, 646, 645,
- /* 710 */ 644, 642, 641, 640, 638, 634, 633, 181, 600, 566,
- /* 720 */ 564, 837, 836, 836, 836, 836, 836, 836, 836, 836,
- /* 730 */ 836, 836, 836, 138, 139,
+ /* 120 */ 48, 197, 57, 302, 62, 60, 63, 61, 1092, 232,
+ /* 130 */ 1040, 1092, 53, 52, 197, 89, 51, 50, 49, 54,
+ /* 140 */ 56, 968, 58, 59, 1092, 970, 239, 48, 262, 57,
+ /* 150 */ 302, 62, 60, 63, 61, 268, 267, 229, 36, 53,
+ /* 160 */ 52, 979, 248, 51, 50, 49, 41, 298, 339, 338,
+ /* 170 */ 297, 296, 295, 337, 294, 293, 292, 336, 291, 335,
+ /* 180 */ 334, 944, 932, 933, 934, 935, 936, 937, 938, 939,
+ /* 190 */ 940, 941, 942, 943, 945, 946, 58, 59, 24, 980,
+ /* 200 */ 239, 48, 90, 57, 302, 62, 60, 63, 61, 51,
+ /* 210 */ 50, 49, 152, 53, 52, 205, 78, 51, 50, 49,
+ /* 220 */ 53, 52, 206, 300, 51, 50, 49, 129, 128, 204,
+ /* 230 */ 732, 238, 783, 307, 83, 772, 740, 775, 35, 778,
+ /* 240 */ 147, 238, 783, 116, 253, 772, 65, 775, 35, 778,
+ /* 250 */ 300, 332, 152, 257, 256, 35, 879, 62, 60, 63,
+ /* 260 */ 61, 179, 332, 217, 218, 53, 52, 303, 42, 51,
+ /* 270 */ 50, 49, 700, 217, 218, 697, 304, 698, 14, 699,
+ /* 280 */ 41, 230, 339, 338, 1041, 979, 280, 337, 340, 913,
+ /* 290 */ 260, 336, 75, 335, 334, 978, 1, 167, 311, 213,
+ /* 300 */ 628, 242, 979, 244, 245, 114, 108, 119, 94, 91,
+ /* 310 */ 64, 195, 118, 124, 127, 117, 952, 80, 950, 951,
+ /* 320 */ 64, 121, 282, 953, 88, 76, 247, 954, 35, 955,
+ /* 330 */ 956, 5, 38, 169, 92, 3, 180, 35, 168, 102,
+ /* 340 */ 97, 101, 784, 779, 35, 35, 35, 35, 716, 780,
+ /* 350 */ 676, 264, 784, 779, 188, 186, 184, 200, 967, 780,
+ /* 360 */ 35, 183, 132, 131, 130, 1088, 965, 966, 34, 969,
+ /* 370 */ 1087, 312, 243, 787, 241, 979, 310, 309, 322, 321,
+ /* 380 */ 313, 889, 701, 702, 979, 81, 179, 314, 318, 319,
+ /* 390 */ 320, 979, 979, 979, 979, 880, 774, 249, 777, 246,
+ /* 400 */ 179, 317, 316, 324, 773, 713, 776, 979, 343, 342,
+ /* 410 */ 137, 741, 143, 141, 140, 749, 750, 68, 71, 264,
+ /* 420 */ 686, 26, 237, 285, 16, 688, 15, 287, 720, 687,
+ /* 430 */ 36, 981, 9, 36, 33, 65, 804, 93, 785, 65,
+ /* 440 */ 595, 74, 6, 107, 1086, 106, 25, 18, 25, 17,
+ /* 450 */ 25, 705, 703, 706, 704, 20, 113, 19, 112, 72,
+ /* 460 */ 69, 215, 22, 288, 21, 126, 125, 216, 198, 675,
+ /* 470 */ 199, 201, 196, 202, 203, 208, 209, 210, 207, 193,
+ /* 480 */ 1111, 1103, 1051, 1050, 227, 1047, 1046, 228, 323, 45,
+ /* 490 */ 258, 144, 1003, 1014, 1011, 1012, 1016, 996, 142, 265,
+ /* 500 */ 146, 1033, 150, 274, 1032, 977, 163, 164, 269, 224,
+ /* 510 */ 781, 975, 165, 166, 892, 290, 731, 43, 782, 191,
+ /* 520 */ 283, 993, 39, 301, 154, 888, 308, 1110, 104, 1109,
+ /* 530 */ 1106, 170, 315, 73, 1102, 110, 271, 278, 70, 153,
+ /* 540 */ 47, 1101, 155, 1098, 281, 171, 910, 40, 279, 277,
+ /* 550 */ 37, 275, 44, 273, 192, 270, 156, 876, 120, 874,
+ /* 560 */ 122, 123, 872, 871, 250, 182, 869, 868, 867, 866,
+ /* 570 */ 865, 864, 185, 187, 861, 859, 857, 855, 189, 852,
+ /* 580 */ 190, 333, 263, 79, 46, 84, 115, 272, 1034, 325,
+ /* 590 */ 326, 327, 328, 329, 330, 331, 214, 341, 829, 234,
+ /* 600 */ 251, 252, 289, 828, 254, 255, 827, 211, 212, 809,
+ /* 610 */ 98, 810, 99, 259, 264, 284, 10, 82, 708, 266,
+ /* 620 */ 85, 30, 870, 174, 178, 863, 911, 172, 173, 175,
+ /* 630 */ 176, 4, 133, 177, 862, 912, 134, 135, 948, 136,
+ /* 640 */ 854, 733, 148, 853, 157, 158, 159, 160, 736, 161,
+ /* 650 */ 149, 162, 958, 2, 86, 226, 738, 87, 276, 31,
+ /* 660 */ 742, 151, 32, 13, 11, 27, 286, 28, 12, 641,
+ /* 670 */ 96, 94, 639, 638, 637, 635, 634, 633, 630, 299,
+ /* 680 */ 600, 100, 7, 305, 786, 788, 8, 306, 103, 105,
+ /* 690 */ 66, 67, 109, 111, 678, 36, 677, 674, 622, 620,
+ /* 700 */ 612, 618, 614, 616, 610, 608, 644, 643, 642, 640,
+ /* 710 */ 636, 632, 631, 181, 598, 564, 562, 833, 832, 832,
+ /* 720 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 138,
+ /* 730 */ 139,
};
static const YYCODETYPE yy_lookahead[] = {
/* 0 */ 259, 1, 194, 1, 193, 194, 259, 191, 192, 9,
@@ -289,73 +289,73 @@ static const YYCODETYPE yy_lookahead[] = {
/* 40 */ 13, 14, 194, 16, 17, 256, 256, 20, 21, 1,
/* 50 */ 23, 24, 25, 26, 27, 28, 240, 9, 194, 200,
/* 60 */ 33, 34, 200, 194, 37, 38, 39, 14, 260, 16,
- /* 70 */ 17, 5, 256, 20, 21, 200, 23, 24, 25, 26,
+ /* 70 */ 17, 1, 256, 20, 21, 200, 23, 24, 25, 26,
/* 80 */ 27, 28, 80, 80, 194, 237, 33, 34, 194, 241,
/* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51,
- /* 100 */ 52, 53, 54, 55, 56, 57, 58, 81, 81, 61,
+ /* 100 */ 52, 53, 54, 55, 56, 57, 58, 37, 81, 61,
/* 110 */ 201, 111, 13, 14, 194, 16, 17, 114, 259, 20,
/* 120 */ 21, 259, 23, 24, 25, 26, 27, 28, 269, 239,
/* 130 */ 266, 269, 33, 34, 259, 266, 37, 38, 39, 13,
- /* 140 */ 14, 115, 16, 17, 269, 236, 20, 21, 0, 23,
- /* 150 */ 24, 25, 26, 27, 28, 261, 262, 237, 82, 33,
+ /* 140 */ 14, 0, 16, 17, 269, 236, 20, 21, 81, 23,
+ /* 150 */ 24, 25, 26, 27, 28, 261, 262, 237, 91, 33,
/* 160 */ 34, 241, 194, 37, 38, 39, 92, 93, 94, 95,
/* 170 */ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
/* 180 */ 106, 215, 216, 217, 218, 219, 220, 221, 222, 223,
/* 190 */ 224, 225, 226, 227, 228, 229, 16, 17, 44, 231,
- /* 200 */ 20, 21, 137, 23, 24, 25, 26, 27, 28, 194,
- /* 210 */ 259, 146, 147, 33, 34, 61, 84, 37, 38, 39,
- /* 220 */ 33, 34, 68, 80, 37, 38, 39, 73, 74, 75,
- /* 230 */ 82, 1, 2, 79, 80, 5, 81, 7, 76, 9,
- /* 240 */ 259, 1, 2, 199, 194, 5, 84, 7, 204, 9,
- /* 250 */ 62, 63, 64, 110, 111, 259, 241, 69, 70, 71,
- /* 260 */ 72, 242, 194, 33, 34, 194, 78, 37, 114, 92,
- /* 270 */ 115, 94, 95, 33, 34, 68, 99, 259, 101, 102,
- /* 280 */ 103, 2, 105, 106, 5, 242, 7, 237, 9, 259,
- /* 290 */ 136, 241, 138, 215, 5, 217, 218, 213, 214, 145,
- /* 300 */ 222, 68, 224, 225, 226, 237, 228, 229, 194, 241,
- /* 310 */ 80, 194, 33, 34, 202, 203, 25, 26, 27, 28,
- /* 320 */ 80, 199, 33, 34, 33, 34, 204, 242, 37, 38,
- /* 330 */ 39, 62, 63, 64, 201, 264, 259, 266, 69, 70,
- /* 340 */ 71, 72, 112, 113, 37, 201, 139, 194, 141, 119,
- /* 350 */ 143, 144, 112, 113, 237, 62, 63, 64, 241, 119,
- /* 360 */ 194, 194, 69, 70, 71, 72, 233, 234, 235, 236,
- /* 370 */ 194, 91, 139, 194, 141, 194, 143, 144, 234, 5,
- /* 380 */ 266, 7, 268, 199, 37, 38, 39, 5, 204, 7,
- /* 390 */ 237, 112, 113, 91, 241, 65, 66, 67, 62, 63,
- /* 400 */ 64, 243, 91, 237, 237, 197, 198, 241, 241, 127,
- /* 410 */ 128, 1, 80, 237, 134, 257, 237, 241, 237, 15,
- /* 420 */ 241, 81, 241, 81, 117, 81, 81, 81, 259, 118,
- /* 430 */ 81, 91, 81, 91, 132, 91, 91, 91, 81, 81,
- /* 440 */ 91, 109, 91, 81, 81, 60, 81, 37, 91, 91,
- /* 450 */ 140, 80, 142, 91, 91, 140, 91, 142, 140, 5,
- /* 460 */ 142, 7, 5, 259, 7, 80, 140, 140, 142, 142,
- /* 470 */ 259, 140, 259, 142, 76, 77, 259, 259, 259, 108,
- /* 480 */ 259, 259, 259, 259, 259, 232, 232, 232, 232, 232,
- /* 490 */ 232, 232, 194, 119, 194, 194, 258, 194, 194, 194,
- /* 500 */ 240, 119, 240, 194, 267, 244, 194, 194, 194, 267,
- /* 510 */ 240, 194, 60, 194, 194, 194, 112, 194, 194, 194,
- /* 520 */ 119, 255, 194, 194, 124, 253, 263, 194, 194, 194,
- /* 530 */ 263, 194, 194, 194, 263, 194, 263, 194, 131, 133,
- /* 540 */ 254, 130, 194, 125, 194, 129, 194, 194, 194, 194,
- /* 550 */ 194, 252, 123, 194, 194, 194, 122, 251, 194, 121,
- /* 560 */ 194, 194, 194, 194, 194, 249, 194, 194, 120, 194,
- /* 570 */ 194, 194, 194, 194, 194, 194, 135, 194, 194, 107,
- /* 580 */ 194, 194, 194, 194, 194, 194, 194, 194, 90, 195,
- /* 590 */ 195, 195, 195, 89, 195, 50, 86, 88, 54, 87,
- /* 600 */ 85, 82, 5, 5, 195, 5, 148, 148, 195, 195,
- /* 610 */ 5, 5, 94, 93, 137, 195, 195, 115, 201, 80,
- /* 620 */ 201, 116, 109, 81, 91, 91, 80, 195, 81, 80,
- /* 630 */ 195, 206, 196, 210, 212, 211, 209, 207, 205, 208,
- /* 640 */ 196, 196, 214, 196, 195, 230, 195, 197, 195, 81,
- /* 650 */ 245, 248, 250, 91, 80, 247, 230, 202, 246, 81,
- /* 660 */ 80, 1, 80, 91, 81, 80, 126, 91, 80, 126,
- /* 670 */ 80, 80, 109, 9, 110, 76, 5, 5, 5, 5,
- /* 680 */ 5, 15, 80, 76, 83, 81, 24, 112, 80, 58,
- /* 690 */ 142, 5, 16, 142, 91, 16, 142, 142, 5, 81,
+ /* 200 */ 20, 21, 243, 23, 24, 25, 26, 27, 28, 37,
+ /* 210 */ 38, 39, 194, 33, 34, 61, 257, 37, 38, 39,
+ /* 220 */ 33, 34, 68, 82, 37, 38, 39, 73, 74, 75,
+ /* 230 */ 81, 1, 2, 79, 80, 5, 81, 7, 194, 9,
+ /* 240 */ 91, 1, 2, 76, 137, 5, 91, 7, 194, 9,
+ /* 250 */ 82, 84, 194, 146, 147, 194, 199, 25, 26, 27,
+ /* 260 */ 28, 204, 84, 33, 34, 33, 34, 37, 114, 37,
+ /* 270 */ 38, 39, 2, 33, 34, 5, 15, 7, 80, 9,
+ /* 280 */ 92, 237, 94, 95, 266, 241, 268, 99, 213, 214,
+ /* 290 */ 136, 103, 138, 105, 106, 241, 202, 203, 237, 145,
+ /* 300 */ 5, 68, 241, 33, 34, 62, 63, 64, 110, 111,
+ /* 310 */ 80, 259, 69, 70, 71, 72, 215, 81, 217, 218,
+ /* 320 */ 80, 78, 264, 222, 266, 201, 68, 226, 194, 228,
+ /* 330 */ 229, 62, 63, 64, 201, 197, 198, 194, 69, 70,
+ /* 340 */ 71, 72, 112, 113, 194, 194, 194, 194, 37, 119,
+ /* 350 */ 5, 115, 112, 113, 62, 63, 64, 259, 234, 119,
+ /* 360 */ 194, 69, 70, 71, 72, 259, 233, 234, 235, 236,
+ /* 370 */ 259, 237, 139, 112, 141, 241, 143, 144, 33, 34,
+ /* 380 */ 237, 199, 112, 113, 241, 81, 204, 237, 237, 237,
+ /* 390 */ 237, 241, 241, 241, 241, 199, 5, 139, 7, 141,
+ /* 400 */ 204, 143, 144, 237, 5, 91, 7, 241, 65, 66,
+ /* 410 */ 67, 81, 62, 63, 64, 127, 128, 91, 91, 115,
+ /* 420 */ 81, 91, 60, 81, 140, 81, 142, 81, 117, 81,
+ /* 430 */ 91, 242, 118, 91, 80, 91, 81, 91, 81, 91,
+ /* 440 */ 81, 80, 80, 140, 259, 142, 91, 140, 91, 142,
+ /* 450 */ 91, 5, 5, 7, 7, 140, 140, 142, 142, 132,
+ /* 460 */ 134, 259, 140, 109, 142, 76, 77, 259, 259, 108,
+ /* 470 */ 259, 259, 259, 259, 259, 259, 259, 259, 259, 259,
+ /* 480 */ 242, 242, 232, 232, 232, 232, 232, 232, 232, 258,
+ /* 490 */ 194, 194, 194, 194, 194, 194, 194, 240, 60, 240,
+ /* 500 */ 194, 267, 194, 194, 267, 240, 244, 194, 263, 263,
+ /* 510 */ 119, 194, 194, 194, 194, 194, 119, 194, 119, 194,
+ /* 520 */ 125, 255, 194, 194, 253, 194, 194, 194, 194, 194,
+ /* 530 */ 194, 194, 194, 131, 194, 194, 263, 263, 133, 254,
+ /* 540 */ 130, 194, 252, 194, 129, 194, 194, 194, 124, 123,
+ /* 550 */ 194, 122, 194, 121, 194, 120, 251, 194, 194, 194,
+ /* 560 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194,
+ /* 570 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194,
+ /* 580 */ 194, 107, 195, 195, 135, 195, 90, 195, 195, 89,
+ /* 590 */ 50, 86, 88, 54, 87, 85, 195, 82, 5, 195,
+ /* 600 */ 148, 5, 195, 5, 148, 5, 5, 195, 195, 93,
+ /* 610 */ 201, 94, 201, 137, 115, 109, 80, 116, 81, 91,
+ /* 620 */ 91, 80, 195, 206, 205, 195, 212, 211, 210, 209,
+ /* 630 */ 207, 197, 196, 208, 195, 214, 196, 196, 230, 196,
+ /* 640 */ 195, 81, 80, 195, 250, 249, 248, 247, 81, 246,
+ /* 650 */ 91, 245, 230, 202, 80, 1, 81, 80, 80, 91,
+ /* 660 */ 81, 80, 91, 80, 126, 80, 109, 80, 126, 9,
+ /* 670 */ 76, 110, 5, 5, 5, 5, 5, 5, 5, 15,
+ /* 680 */ 83, 76, 80, 24, 81, 112, 80, 58, 142, 142,
+ /* 690 */ 16, 16, 142, 142, 5, 91, 5, 81, 5, 5,
/* 700 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- /* 710 */ 5, 5, 5, 5, 5, 5, 5, 91, 83, 60,
- /* 720 */ 59, 0, 270, 270, 270, 270, 270, 270, 270, 270,
- /* 730 */ 270, 270, 270, 21, 21, 270, 270, 270, 270, 270,
+ /* 710 */ 5, 5, 5, 91, 83, 60, 59, 0, 270, 270,
+ /* 720 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 21,
+ /* 730 */ 21, 270, 270, 270, 270, 270, 270, 270, 270, 270,
/* 740 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
/* 750 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
/* 760 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
@@ -374,109 +374,109 @@ static const YYCODETYPE yy_lookahead[] = {
/* 890 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
/* 900 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
/* 910 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270,
- /* 920 */ 270, 270, 270, 270, 270,
+ /* 920 */ 270,
};
#define YY_SHIFT_COUNT (346)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (721)
+#define YY_SHIFT_MAX (717)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 154, 74, 74, 177, 177, 76, 230, 240, 240, 2,
+ /* 0 */ 154, 74, 74, 188, 188, 168, 230, 240, 240, 2,
/* 10 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- /* 20 */ 9, 9, 9, 0, 48, 240, 279, 279, 279, 3,
- /* 30 */ 3, 9, 9, 9, 148, 9, 9, 162, 76, 132,
- /* 40 */ 132, 66, 735, 735, 735, 240, 240, 240, 240, 240,
+ /* 20 */ 9, 9, 9, 0, 48, 240, 270, 270, 270, 3,
+ /* 30 */ 3, 9, 9, 9, 141, 9, 9, 167, 168, 178,
+ /* 40 */ 178, 295, 731, 731, 731, 240, 240, 240, 240, 240,
/* 50 */ 240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
- /* 60 */ 240, 240, 240, 240, 240, 279, 279, 279, 289, 289,
- /* 70 */ 289, 289, 289, 289, 289, 9, 9, 9, 307, 9,
- /* 80 */ 9, 9, 3, 3, 9, 9, 9, 9, 282, 282,
- /* 90 */ 311, 3, 9, 9, 9, 9, 9, 9, 9, 9,
+ /* 60 */ 240, 240, 240, 240, 240, 270, 270, 270, 345, 345,
+ /* 70 */ 345, 345, 345, 345, 345, 9, 9, 9, 311, 9,
+ /* 80 */ 9, 9, 3, 3, 9, 9, 9, 9, 288, 288,
+ /* 90 */ 314, 3, 9, 9, 9, 9, 9, 9, 9, 9,
/* 100 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
/* 110 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
/* 120 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
/* 130 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- /* 140 */ 9, 9, 9, 9, 452, 452, 452, 401, 401, 401,
- /* 150 */ 452, 401, 452, 407, 406, 411, 418, 416, 400, 429,
- /* 160 */ 434, 438, 448, 441, 452, 452, 452, 472, 76, 76,
- /* 170 */ 452, 452, 498, 504, 545, 510, 509, 544, 512, 515,
- /* 180 */ 472, 66, 452, 519, 519, 452, 519, 452, 519, 452,
- /* 190 */ 452, 735, 735, 27, 99, 99, 126, 99, 53, 180,
- /* 200 */ 291, 291, 291, 291, 188, 269, 293, 187, 187, 187,
- /* 210 */ 187, 207, 233, 65, 143, 347, 347, 374, 382, 330,
- /* 220 */ 336, 340, 26, 155, 342, 344, 345, 280, 302, 346,
- /* 230 */ 349, 351, 357, 358, 332, 362, 363, 410, 385, 404,
- /* 240 */ 365, 310, 315, 318, 454, 457, 326, 327, 371, 331,
- /* 250 */ 398, 597, 458, 598, 600, 459, 605, 606, 518, 520,
- /* 260 */ 477, 502, 513, 539, 505, 542, 546, 533, 534, 547,
- /* 270 */ 549, 568, 562, 574, 578, 580, 660, 582, 583, 585,
- /* 280 */ 572, 540, 576, 543, 588, 513, 590, 563, 591, 564,
- /* 290 */ 599, 664, 671, 672, 673, 674, 675, 601, 666, 607,
- /* 300 */ 602, 604, 575, 608, 662, 631, 676, 548, 551, 603,
- /* 310 */ 603, 603, 603, 679, 554, 555, 603, 603, 603, 686,
- /* 320 */ 693, 618, 603, 695, 696, 697, 698, 699, 700, 701,
- /* 330 */ 702, 703, 704, 705, 706, 707, 708, 709, 710, 711,
- /* 340 */ 626, 635, 712, 713, 659, 661, 721,
+ /* 140 */ 9, 9, 9, 9, 438, 438, 438, 397, 397, 397,
+ /* 150 */ 438, 397, 438, 402, 405, 395, 410, 415, 424, 426,
+ /* 160 */ 429, 432, 435, 449, 438, 438, 438, 474, 168, 168,
+ /* 170 */ 438, 438, 496, 500, 540, 505, 504, 539, 507, 510,
+ /* 180 */ 474, 295, 438, 515, 515, 438, 515, 438, 515, 438,
+ /* 190 */ 438, 731, 731, 27, 99, 99, 126, 99, 53, 180,
+ /* 200 */ 232, 232, 232, 232, 243, 269, 292, 187, 187, 187,
+ /* 210 */ 187, 233, 258, 107, 198, 172, 172, 391, 399, 343,
+ /* 220 */ 350, 67, 236, 304, 149, 155, 330, 326, 327, 339,
+ /* 230 */ 342, 344, 346, 348, 354, 355, 357, 70, 362, 261,
+ /* 240 */ 359, 284, 303, 307, 446, 447, 315, 316, 361, 322,
+ /* 250 */ 389, 593, 452, 596, 598, 456, 600, 601, 517, 516,
+ /* 260 */ 476, 499, 506, 536, 501, 537, 541, 528, 529, 560,
+ /* 270 */ 562, 567, 559, 574, 575, 577, 654, 578, 579, 581,
+ /* 280 */ 568, 538, 571, 542, 583, 506, 585, 557, 587, 561,
+ /* 290 */ 594, 660, 667, 668, 669, 670, 671, 672, 673, 597,
+ /* 300 */ 664, 605, 602, 603, 573, 606, 659, 629, 674, 546,
+ /* 310 */ 547, 604, 604, 604, 604, 675, 550, 551, 604, 604,
+ /* 320 */ 604, 689, 691, 616, 604, 693, 694, 695, 696, 697,
+ /* 330 */ 698, 699, 700, 701, 702, 703, 704, 705, 706, 707,
+ /* 340 */ 622, 631, 708, 709, 655, 657, 717,
};
#define YY_REDUCE_COUNT (192)
#define YY_REDUCE_MIN (-259)
-#define YY_REDUCE_MAX (455)
+#define YY_REDUCE_MAX (451)
static const short yy_reduce_ofst[] = {
- /* 0 */ -184, -34, -34, 78, 78, 133, -141, -138, -125, -106,
- /* 10 */ -152, 114, 71, -80, 50, 68, 117, 153, 166, 167,
- /* 20 */ 176, 179, 181, -192, -189, -247, -223, -207, -206, -211,
- /* 30 */ -210, -136, -131, -110, -91, -32, 15, 44, 144, 122,
- /* 40 */ 184, 84, 158, 112, 208, -259, -253, -49, -19, -4,
- /* 50 */ 18, 30, 77, 169, 204, 211, 213, 217, 218, 219,
- /* 60 */ 221, 222, 223, 224, 225, 19, 43, 85, 253, 254,
- /* 70 */ 255, 256, 257, 258, 259, 298, 300, 301, 238, 303,
- /* 80 */ 304, 305, 260, 262, 309, 312, 313, 314, 237, 242,
- /* 90 */ 261, 270, 317, 319, 320, 321, 323, 324, 325, 328,
- /* 100 */ 329, 333, 334, 335, 337, 338, 339, 341, 343, 348,
- /* 110 */ 350, 352, 353, 354, 355, 356, 359, 360, 361, 364,
- /* 120 */ 366, 367, 368, 369, 370, 372, 373, 375, 376, 377,
- /* 130 */ 378, 379, 380, 381, 383, 384, 386, 387, 388, 389,
- /* 140 */ 390, 391, 392, 393, 394, 395, 396, 263, 267, 271,
- /* 150 */ 397, 273, 399, 266, 286, 272, 299, 306, 402, 316,
- /* 160 */ 403, 408, 412, 405, 409, 413, 414, 415, 417, 419,
- /* 170 */ 420, 421, 422, 424, 423, 425, 427, 430, 431, 433,
- /* 180 */ 426, 428, 432, 436, 444, 435, 445, 449, 447, 451,
- /* 190 */ 453, 455, 450,
+ /* 0 */ -184, -34, -34, 101, 101, 133, -141, -138, -125, -106,
+ /* 10 */ -152, 18, 58, -80, 44, 61, 134, 143, 150, 151,
+ /* 20 */ 152, 153, 166, -192, -189, -247, -223, -207, -206, -211,
+ /* 30 */ -210, -136, -131, -110, -91, -32, 54, 57, 124, 182,
+ /* 40 */ 196, 75, -41, 94, 138, -259, -253, 52, 98, 106,
+ /* 50 */ 111, 185, 202, 208, 209, 211, 212, 213, 214, 215,
+ /* 60 */ 216, 217, 218, 219, 220, 189, 238, 239, 250, 251,
+ /* 70 */ 252, 253, 254, 255, 256, 296, 297, 298, 231, 299,
+ /* 80 */ 300, 301, 257, 259, 302, 306, 308, 309, 234, 237,
+ /* 90 */ 262, 265, 313, 317, 318, 319, 320, 321, 323, 325,
+ /* 100 */ 328, 329, 331, 332, 333, 334, 335, 336, 337, 338,
+ /* 110 */ 340, 341, 347, 349, 351, 352, 353, 356, 358, 360,
+ /* 120 */ 363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
+ /* 130 */ 373, 374, 375, 376, 377, 378, 379, 380, 381, 382,
+ /* 140 */ 383, 384, 385, 386, 387, 388, 390, 245, 246, 273,
+ /* 150 */ 392, 274, 393, 266, 285, 271, 290, 305, 394, 396,
+ /* 160 */ 398, 400, 403, 406, 401, 404, 407, 408, 409, 411,
+ /* 170 */ 412, 413, 414, 416, 418, 417, 420, 423, 425, 419,
+ /* 180 */ 422, 421, 427, 436, 440, 430, 441, 439, 443, 445,
+ /* 190 */ 448, 451, 434,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 834, 951, 894, 963, 881, 891, 1100, 1100, 1100, 834,
- /* 10 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 20 */ 834, 834, 834, 1011, 853, 1100, 834, 834, 834, 834,
- /* 30 */ 834, 834, 834, 834, 891, 834, 834, 897, 891, 897,
- /* 40 */ 897, 834, 1006, 935, 953, 834, 834, 834, 834, 834,
- /* 50 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 60 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 70 */ 834, 834, 834, 834, 834, 834, 834, 834, 1013, 1019,
- /* 80 */ 1016, 834, 834, 834, 1021, 834, 834, 834, 1043, 1043,
- /* 90 */ 1004, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 100 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 110 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 120 */ 879, 834, 877, 834, 834, 834, 834, 834, 834, 834,
- /* 130 */ 834, 834, 834, 834, 834, 834, 834, 864, 834, 834,
- /* 140 */ 834, 834, 834, 834, 855, 855, 855, 834, 834, 834,
- /* 150 */ 855, 834, 855, 1050, 1054, 1048, 1036, 1044, 1035, 1031,
- /* 160 */ 1029, 1027, 1026, 1058, 855, 855, 855, 895, 891, 891,
- /* 170 */ 855, 855, 913, 911, 909, 901, 907, 903, 905, 899,
- /* 180 */ 882, 834, 855, 889, 889, 855, 889, 855, 889, 855,
- /* 190 */ 855, 935, 953, 834, 1059, 1049, 834, 1099, 1089, 1088,
- /* 200 */ 1095, 1087, 1086, 1085, 834, 834, 834, 1081, 1084, 1083,
- /* 210 */ 1082, 834, 834, 834, 834, 1091, 1090, 834, 834, 834,
- /* 220 */ 834, 834, 834, 834, 834, 834, 834, 1055, 1051, 834,
- /* 230 */ 834, 834, 834, 834, 834, 834, 834, 834, 1061, 834,
- /* 240 */ 834, 834, 834, 834, 834, 834, 834, 834, 965, 834,
- /* 250 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 260 */ 834, 1003, 834, 834, 834, 834, 834, 1015, 1014, 834,
- /* 270 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 280 */ 1045, 834, 1037, 834, 834, 977, 834, 834, 834, 834,
- /* 290 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 300 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 1118,
- /* 310 */ 1113, 1114, 1111, 834, 834, 834, 1110, 1105, 1106, 834,
- /* 320 */ 834, 834, 1103, 834, 834, 834, 834, 834, 834, 834,
- /* 330 */ 834, 834, 834, 834, 834, 834, 834, 834, 834, 834,
- /* 340 */ 919, 834, 862, 860, 834, 851, 834,
+ /* 0 */ 830, 947, 890, 957, 877, 887, 1094, 1094, 1094, 830,
+ /* 10 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 20 */ 830, 830, 830, 1005, 849, 1094, 830, 830, 830, 830,
+ /* 30 */ 830, 830, 830, 830, 887, 830, 830, 893, 887, 893,
+ /* 40 */ 893, 830, 1000, 931, 949, 830, 830, 830, 830, 830,
+ /* 50 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 60 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 70 */ 830, 830, 830, 830, 830, 830, 830, 830, 1007, 1013,
+ /* 80 */ 1010, 830, 830, 830, 1015, 830, 830, 830, 1037, 1037,
+ /* 90 */ 998, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 100 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 110 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 120 */ 875, 830, 873, 830, 830, 830, 830, 830, 830, 830,
+ /* 130 */ 830, 830, 830, 830, 830, 830, 830, 860, 830, 830,
+ /* 140 */ 830, 830, 830, 830, 851, 851, 851, 830, 830, 830,
+ /* 150 */ 851, 830, 851, 1044, 1048, 1030, 1042, 1038, 1029, 1025,
+ /* 160 */ 1023, 1021, 1020, 1052, 851, 851, 851, 891, 887, 887,
+ /* 170 */ 851, 851, 909, 907, 905, 897, 903, 899, 901, 895,
+ /* 180 */ 878, 830, 851, 885, 885, 851, 885, 851, 885, 851,
+ /* 190 */ 851, 931, 949, 830, 1053, 1043, 830, 1093, 1083, 1082,
+ /* 200 */ 1089, 1081, 1080, 1079, 830, 830, 830, 1075, 1078, 1077,
+ /* 210 */ 1076, 830, 830, 830, 830, 1085, 1084, 830, 830, 830,
+ /* 220 */ 830, 830, 830, 830, 830, 830, 830, 1049, 1045, 830,
+ /* 230 */ 830, 830, 830, 830, 830, 830, 830, 830, 1055, 830,
+ /* 240 */ 830, 830, 830, 830, 830, 830, 830, 830, 959, 830,
+ /* 250 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 260 */ 830, 997, 830, 830, 830, 830, 830, 1009, 1008, 830,
+ /* 270 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 280 */ 1039, 830, 1031, 830, 830, 971, 830, 830, 830, 830,
+ /* 290 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 300 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 310 */ 830, 1112, 1107, 1108, 1105, 830, 830, 830, 1104, 1099,
+ /* 320 */ 1100, 830, 830, 830, 1097, 830, 830, 830, 830, 830,
+ /* 330 */ 830, 830, 830, 830, 830, 830, 830, 830, 830, 830,
+ /* 340 */ 915, 830, 858, 856, 830, 847, 830,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -1024,8 +1024,8 @@ static const char *const yyTokenName[] = {
/* 249 */ "fill_opt",
/* 250 */ "sliding_opt",
/* 251 */ "groupby_opt",
- /* 252 */ "orderby_opt",
- /* 253 */ "having_opt",
+ /* 252 */ "having_opt",
+ /* 253 */ "orderby_opt",
/* 254 */ "slimit_opt",
/* 255 */ "limit_opt",
/* 256 */ "union",
@@ -1171,169 +1171,167 @@ static const char *const yyRuleName[] = {
/* 119 */ "alter_db_optr ::= alter_db_optr keep",
/* 120 */ "alter_db_optr ::= alter_db_optr blocks",
/* 121 */ "alter_db_optr ::= alter_db_optr comp",
- /* 122 */ "alter_db_optr ::= alter_db_optr wal",
- /* 123 */ "alter_db_optr ::= alter_db_optr fsync",
- /* 124 */ "alter_db_optr ::= alter_db_optr update",
- /* 125 */ "alter_db_optr ::= alter_db_optr cachelast",
- /* 126 */ "alter_topic_optr ::= alter_db_optr",
- /* 127 */ "alter_topic_optr ::= alter_topic_optr partitions",
- /* 128 */ "typename ::= ids",
- /* 129 */ "typename ::= ids LP signed RP",
- /* 130 */ "typename ::= ids UNSIGNED",
- /* 131 */ "signed ::= INTEGER",
- /* 132 */ "signed ::= PLUS INTEGER",
- /* 133 */ "signed ::= MINUS INTEGER",
- /* 134 */ "cmd ::= CREATE TABLE create_table_args",
- /* 135 */ "cmd ::= CREATE TABLE create_stable_args",
- /* 136 */ "cmd ::= CREATE STABLE create_stable_args",
- /* 137 */ "cmd ::= CREATE TABLE create_table_list",
- /* 138 */ "create_table_list ::= create_from_stable",
- /* 139 */ "create_table_list ::= create_table_list create_from_stable",
- /* 140 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP",
- /* 141 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP",
- /* 142 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP",
- /* 143 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP",
- /* 144 */ "tagNamelist ::= tagNamelist COMMA ids",
- /* 145 */ "tagNamelist ::= ids",
- /* 146 */ "create_table_args ::= ifnotexists ids cpxName AS select",
- /* 147 */ "columnlist ::= columnlist COMMA column",
- /* 148 */ "columnlist ::= column",
- /* 149 */ "column ::= ids typename",
- /* 150 */ "tagitemlist ::= tagitemlist COMMA tagitem",
- /* 151 */ "tagitemlist ::= tagitem",
- /* 152 */ "tagitem ::= INTEGER",
- /* 153 */ "tagitem ::= FLOAT",
- /* 154 */ "tagitem ::= STRING",
- /* 155 */ "tagitem ::= BOOL",
- /* 156 */ "tagitem ::= NULL",
- /* 157 */ "tagitem ::= NOW",
- /* 158 */ "tagitem ::= MINUS INTEGER",
- /* 159 */ "tagitem ::= MINUS FLOAT",
- /* 160 */ "tagitem ::= PLUS INTEGER",
- /* 161 */ "tagitem ::= PLUS FLOAT",
- /* 162 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt",
- /* 163 */ "select ::= LP select RP",
- /* 164 */ "union ::= select",
- /* 165 */ "union ::= union UNION ALL select",
- /* 166 */ "cmd ::= union",
- /* 167 */ "select ::= SELECT selcollist",
- /* 168 */ "sclp ::= selcollist COMMA",
- /* 169 */ "sclp ::=",
- /* 170 */ "selcollist ::= sclp distinct expr as",
- /* 171 */ "selcollist ::= sclp STAR",
- /* 172 */ "as ::= AS ids",
- /* 173 */ "as ::= ids",
- /* 174 */ "as ::=",
- /* 175 */ "distinct ::= DISTINCT",
- /* 176 */ "distinct ::=",
- /* 177 */ "from ::= FROM tablelist",
- /* 178 */ "from ::= FROM sub",
- /* 179 */ "sub ::= LP union RP",
- /* 180 */ "sub ::= LP union RP ids",
- /* 181 */ "sub ::= sub COMMA LP union RP ids",
- /* 182 */ "tablelist ::= ids cpxName",
- /* 183 */ "tablelist ::= ids cpxName ids",
- /* 184 */ "tablelist ::= tablelist COMMA ids cpxName",
- /* 185 */ "tablelist ::= tablelist COMMA ids cpxName ids",
- /* 186 */ "tmvar ::= VARIABLE",
- /* 187 */ "interval_opt ::= INTERVAL LP tmvar RP",
- /* 188 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP",
- /* 189 */ "interval_opt ::=",
- /* 190 */ "session_option ::=",
- /* 191 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP",
- /* 192 */ "windowstate_option ::=",
- /* 193 */ "windowstate_option ::= STATE_WINDOW LP ids RP",
- /* 194 */ "fill_opt ::=",
- /* 195 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
- /* 196 */ "fill_opt ::= FILL LP ID RP",
- /* 197 */ "sliding_opt ::= SLIDING LP tmvar RP",
- /* 198 */ "sliding_opt ::=",
- /* 199 */ "orderby_opt ::=",
- /* 200 */ "orderby_opt ::= ORDER BY sortlist",
- /* 201 */ "sortlist ::= sortlist COMMA item sortorder",
- /* 202 */ "sortlist ::= item sortorder",
- /* 203 */ "item ::= ids cpxName",
- /* 204 */ "sortorder ::= ASC",
- /* 205 */ "sortorder ::= DESC",
- /* 206 */ "sortorder ::=",
- /* 207 */ "groupby_opt ::=",
- /* 208 */ "groupby_opt ::= GROUP BY grouplist",
- /* 209 */ "grouplist ::= grouplist COMMA item",
- /* 210 */ "grouplist ::= item",
- /* 211 */ "having_opt ::=",
- /* 212 */ "having_opt ::= HAVING expr",
- /* 213 */ "limit_opt ::=",
- /* 214 */ "limit_opt ::= LIMIT signed",
- /* 215 */ "limit_opt ::= LIMIT signed OFFSET signed",
- /* 216 */ "limit_opt ::= LIMIT signed COMMA signed",
- /* 217 */ "slimit_opt ::=",
- /* 218 */ "slimit_opt ::= SLIMIT signed",
- /* 219 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
- /* 220 */ "slimit_opt ::= SLIMIT signed COMMA signed",
- /* 221 */ "where_opt ::=",
- /* 222 */ "where_opt ::= WHERE expr",
- /* 223 */ "expr ::= LP expr RP",
- /* 224 */ "expr ::= ID",
- /* 225 */ "expr ::= ID DOT ID",
- /* 226 */ "expr ::= ID DOT STAR",
- /* 227 */ "expr ::= INTEGER",
- /* 228 */ "expr ::= MINUS INTEGER",
- /* 229 */ "expr ::= PLUS INTEGER",
- /* 230 */ "expr ::= FLOAT",
- /* 231 */ "expr ::= MINUS FLOAT",
- /* 232 */ "expr ::= PLUS FLOAT",
- /* 233 */ "expr ::= STRING",
- /* 234 */ "expr ::= NOW",
- /* 235 */ "expr ::= VARIABLE",
- /* 236 */ "expr ::= PLUS VARIABLE",
- /* 237 */ "expr ::= MINUS VARIABLE",
- /* 238 */ "expr ::= BOOL",
- /* 239 */ "expr ::= NULL",
- /* 240 */ "expr ::= ID LP exprlist RP",
- /* 241 */ "expr ::= ID LP STAR RP",
- /* 242 */ "expr ::= expr IS NULL",
- /* 243 */ "expr ::= expr IS NOT NULL",
- /* 244 */ "expr ::= expr LT expr",
- /* 245 */ "expr ::= expr GT expr",
- /* 246 */ "expr ::= expr LE expr",
- /* 247 */ "expr ::= expr GE expr",
- /* 248 */ "expr ::= expr NE expr",
- /* 249 */ "expr ::= expr EQ expr",
- /* 250 */ "expr ::= expr BETWEEN expr AND expr",
- /* 251 */ "expr ::= expr AND expr",
- /* 252 */ "expr ::= expr OR expr",
- /* 253 */ "expr ::= expr PLUS expr",
- /* 254 */ "expr ::= expr MINUS expr",
- /* 255 */ "expr ::= expr STAR expr",
- /* 256 */ "expr ::= expr SLASH expr",
- /* 257 */ "expr ::= expr REM expr",
- /* 258 */ "expr ::= expr LIKE expr",
- /* 259 */ "expr ::= expr IN LP exprlist RP",
- /* 260 */ "exprlist ::= exprlist COMMA expritem",
- /* 261 */ "exprlist ::= expritem",
- /* 262 */ "expritem ::= expr",
- /* 263 */ "expritem ::=",
- /* 264 */ "cmd ::= RESET QUERY CACHE",
- /* 265 */ "cmd ::= SYNCDB ids REPLICA",
- /* 266 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
- /* 267 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
- /* 268 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist",
- /* 269 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
- /* 270 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
- /* 271 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
- /* 272 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
- /* 273 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist",
- /* 274 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
- /* 275 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
- /* 276 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist",
- /* 277 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
- /* 278 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
- /* 279 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
- /* 280 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem",
- /* 281 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist",
- /* 282 */ "cmd ::= KILL CONNECTION INTEGER",
- /* 283 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
- /* 284 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
+ /* 122 */ "alter_db_optr ::= alter_db_optr update",
+ /* 123 */ "alter_db_optr ::= alter_db_optr cachelast",
+ /* 124 */ "alter_topic_optr ::= alter_db_optr",
+ /* 125 */ "alter_topic_optr ::= alter_topic_optr partitions",
+ /* 126 */ "typename ::= ids",
+ /* 127 */ "typename ::= ids LP signed RP",
+ /* 128 */ "typename ::= ids UNSIGNED",
+ /* 129 */ "signed ::= INTEGER",
+ /* 130 */ "signed ::= PLUS INTEGER",
+ /* 131 */ "signed ::= MINUS INTEGER",
+ /* 132 */ "cmd ::= CREATE TABLE create_table_args",
+ /* 133 */ "cmd ::= CREATE TABLE create_stable_args",
+ /* 134 */ "cmd ::= CREATE STABLE create_stable_args",
+ /* 135 */ "cmd ::= CREATE TABLE create_table_list",
+ /* 136 */ "create_table_list ::= create_from_stable",
+ /* 137 */ "create_table_list ::= create_table_list create_from_stable",
+ /* 138 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP",
+ /* 139 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP",
+ /* 140 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP",
+ /* 141 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP",
+ /* 142 */ "tagNamelist ::= tagNamelist COMMA ids",
+ /* 143 */ "tagNamelist ::= ids",
+ /* 144 */ "create_table_args ::= ifnotexists ids cpxName AS select",
+ /* 145 */ "columnlist ::= columnlist COMMA column",
+ /* 146 */ "columnlist ::= column",
+ /* 147 */ "column ::= ids typename",
+ /* 148 */ "tagitemlist ::= tagitemlist COMMA tagitem",
+ /* 149 */ "tagitemlist ::= tagitem",
+ /* 150 */ "tagitem ::= INTEGER",
+ /* 151 */ "tagitem ::= FLOAT",
+ /* 152 */ "tagitem ::= STRING",
+ /* 153 */ "tagitem ::= BOOL",
+ /* 154 */ "tagitem ::= NULL",
+ /* 155 */ "tagitem ::= NOW",
+ /* 156 */ "tagitem ::= MINUS INTEGER",
+ /* 157 */ "tagitem ::= MINUS FLOAT",
+ /* 158 */ "tagitem ::= PLUS INTEGER",
+ /* 159 */ "tagitem ::= PLUS FLOAT",
+ /* 160 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt",
+ /* 161 */ "select ::= LP select RP",
+ /* 162 */ "union ::= select",
+ /* 163 */ "union ::= union UNION ALL select",
+ /* 164 */ "cmd ::= union",
+ /* 165 */ "select ::= SELECT selcollist",
+ /* 166 */ "sclp ::= selcollist COMMA",
+ /* 167 */ "sclp ::=",
+ /* 168 */ "selcollist ::= sclp distinct expr as",
+ /* 169 */ "selcollist ::= sclp STAR",
+ /* 170 */ "as ::= AS ids",
+ /* 171 */ "as ::= ids",
+ /* 172 */ "as ::=",
+ /* 173 */ "distinct ::= DISTINCT",
+ /* 174 */ "distinct ::=",
+ /* 175 */ "from ::= FROM tablelist",
+ /* 176 */ "from ::= FROM sub",
+ /* 177 */ "sub ::= LP union RP",
+ /* 178 */ "sub ::= LP union RP ids",
+ /* 179 */ "sub ::= sub COMMA LP union RP ids",
+ /* 180 */ "tablelist ::= ids cpxName",
+ /* 181 */ "tablelist ::= ids cpxName ids",
+ /* 182 */ "tablelist ::= tablelist COMMA ids cpxName",
+ /* 183 */ "tablelist ::= tablelist COMMA ids cpxName ids",
+ /* 184 */ "tmvar ::= VARIABLE",
+ /* 185 */ "interval_opt ::= INTERVAL LP tmvar RP",
+ /* 186 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP",
+ /* 187 */ "interval_opt ::=",
+ /* 188 */ "session_option ::=",
+ /* 189 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP",
+ /* 190 */ "windowstate_option ::=",
+ /* 191 */ "windowstate_option ::= STATE_WINDOW LP ids RP",
+ /* 192 */ "fill_opt ::=",
+ /* 193 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
+ /* 194 */ "fill_opt ::= FILL LP ID RP",
+ /* 195 */ "sliding_opt ::= SLIDING LP tmvar RP",
+ /* 196 */ "sliding_opt ::=",
+ /* 197 */ "orderby_opt ::=",
+ /* 198 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 199 */ "sortlist ::= sortlist COMMA item sortorder",
+ /* 200 */ "sortlist ::= item sortorder",
+ /* 201 */ "item ::= ids cpxName",
+ /* 202 */ "sortorder ::= ASC",
+ /* 203 */ "sortorder ::= DESC",
+ /* 204 */ "sortorder ::=",
+ /* 205 */ "groupby_opt ::=",
+ /* 206 */ "groupby_opt ::= GROUP BY grouplist",
+ /* 207 */ "grouplist ::= grouplist COMMA item",
+ /* 208 */ "grouplist ::= item",
+ /* 209 */ "having_opt ::=",
+ /* 210 */ "having_opt ::= HAVING expr",
+ /* 211 */ "limit_opt ::=",
+ /* 212 */ "limit_opt ::= LIMIT signed",
+ /* 213 */ "limit_opt ::= LIMIT signed OFFSET signed",
+ /* 214 */ "limit_opt ::= LIMIT signed COMMA signed",
+ /* 215 */ "slimit_opt ::=",
+ /* 216 */ "slimit_opt ::= SLIMIT signed",
+ /* 217 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
+ /* 218 */ "slimit_opt ::= SLIMIT signed COMMA signed",
+ /* 219 */ "where_opt ::=",
+ /* 220 */ "where_opt ::= WHERE expr",
+ /* 221 */ "expr ::= LP expr RP",
+ /* 222 */ "expr ::= ID",
+ /* 223 */ "expr ::= ID DOT ID",
+ /* 224 */ "expr ::= ID DOT STAR",
+ /* 225 */ "expr ::= INTEGER",
+ /* 226 */ "expr ::= MINUS INTEGER",
+ /* 227 */ "expr ::= PLUS INTEGER",
+ /* 228 */ "expr ::= FLOAT",
+ /* 229 */ "expr ::= MINUS FLOAT",
+ /* 230 */ "expr ::= PLUS FLOAT",
+ /* 231 */ "expr ::= STRING",
+ /* 232 */ "expr ::= NOW",
+ /* 233 */ "expr ::= VARIABLE",
+ /* 234 */ "expr ::= PLUS VARIABLE",
+ /* 235 */ "expr ::= MINUS VARIABLE",
+ /* 236 */ "expr ::= BOOL",
+ /* 237 */ "expr ::= NULL",
+ /* 238 */ "expr ::= ID LP exprlist RP",
+ /* 239 */ "expr ::= ID LP STAR RP",
+ /* 240 */ "expr ::= expr IS NULL",
+ /* 241 */ "expr ::= expr IS NOT NULL",
+ /* 242 */ "expr ::= expr LT expr",
+ /* 243 */ "expr ::= expr GT expr",
+ /* 244 */ "expr ::= expr LE expr",
+ /* 245 */ "expr ::= expr GE expr",
+ /* 246 */ "expr ::= expr NE expr",
+ /* 247 */ "expr ::= expr EQ expr",
+ /* 248 */ "expr ::= expr BETWEEN expr AND expr",
+ /* 249 */ "expr ::= expr AND expr",
+ /* 250 */ "expr ::= expr OR expr",
+ /* 251 */ "expr ::= expr PLUS expr",
+ /* 252 */ "expr ::= expr MINUS expr",
+ /* 253 */ "expr ::= expr STAR expr",
+ /* 254 */ "expr ::= expr SLASH expr",
+ /* 255 */ "expr ::= expr REM expr",
+ /* 256 */ "expr ::= expr LIKE expr",
+ /* 257 */ "expr ::= expr IN LP exprlist RP",
+ /* 258 */ "exprlist ::= exprlist COMMA expritem",
+ /* 259 */ "exprlist ::= expritem",
+ /* 260 */ "expritem ::= expr",
+ /* 261 */ "expritem ::=",
+ /* 262 */ "cmd ::= RESET QUERY CACHE",
+ /* 263 */ "cmd ::= SYNCDB ids REPLICA",
+ /* 264 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
+ /* 265 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
+ /* 266 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist",
+ /* 267 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
+ /* 268 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
+ /* 269 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
+ /* 270 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 271 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist",
+ /* 272 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
+ /* 273 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
+ /* 274 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist",
+ /* 275 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
+ /* 276 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
+ /* 277 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
+ /* 278 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 279 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist",
+ /* 280 */ "cmd ::= KILL CONNECTION INTEGER",
+ /* 281 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
+ /* 282 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
};
#endif /* NDEBUG */
@@ -1468,7 +1466,7 @@ tSqlExprListDestroy((yypminor->yy441));
case 239: /* tagNamelist */
case 249: /* fill_opt */
case 251: /* groupby_opt */
- case 252: /* orderby_opt */
+ case 253: /* orderby_opt */
case 264: /* sortlist */
case 268: /* grouplist */
{
@@ -1493,7 +1491,7 @@ destroyRelationInfo((yypminor->yy244));
}
break;
case 245: /* where_opt */
- case 253: /* having_opt */
+ case 252: /* having_opt */
case 259: /* expr */
case 269: /* expritem */
{
@@ -1923,169 +1921,167 @@ static const struct {
{ 197, -2 }, /* (119) alter_db_optr ::= alter_db_optr keep */
{ 197, -2 }, /* (120) alter_db_optr ::= alter_db_optr blocks */
{ 197, -2 }, /* (121) alter_db_optr ::= alter_db_optr comp */
- { 197, -2 }, /* (122) alter_db_optr ::= alter_db_optr wal */
- { 197, -2 }, /* (123) alter_db_optr ::= alter_db_optr fsync */
- { 197, -2 }, /* (124) alter_db_optr ::= alter_db_optr update */
- { 197, -2 }, /* (125) alter_db_optr ::= alter_db_optr cachelast */
- { 198, -1 }, /* (126) alter_topic_optr ::= alter_db_optr */
- { 198, -2 }, /* (127) alter_topic_optr ::= alter_topic_optr partitions */
- { 231, -1 }, /* (128) typename ::= ids */
- { 231, -4 }, /* (129) typename ::= ids LP signed RP */
- { 231, -2 }, /* (130) typename ::= ids UNSIGNED */
- { 232, -1 }, /* (131) signed ::= INTEGER */
- { 232, -2 }, /* (132) signed ::= PLUS INTEGER */
- { 232, -2 }, /* (133) signed ::= MINUS INTEGER */
- { 192, -3 }, /* (134) cmd ::= CREATE TABLE create_table_args */
- { 192, -3 }, /* (135) cmd ::= CREATE TABLE create_stable_args */
- { 192, -3 }, /* (136) cmd ::= CREATE STABLE create_stable_args */
- { 192, -3 }, /* (137) cmd ::= CREATE TABLE create_table_list */
- { 235, -1 }, /* (138) create_table_list ::= create_from_stable */
- { 235, -2 }, /* (139) create_table_list ::= create_table_list create_from_stable */
- { 233, -6 }, /* (140) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
- { 234, -10 }, /* (141) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
- { 236, -10 }, /* (142) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
- { 236, -13 }, /* (143) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
- { 239, -3 }, /* (144) tagNamelist ::= tagNamelist COMMA ids */
- { 239, -1 }, /* (145) tagNamelist ::= ids */
- { 233, -5 }, /* (146) create_table_args ::= ifnotexists ids cpxName AS select */
- { 237, -3 }, /* (147) columnlist ::= columnlist COMMA column */
- { 237, -1 }, /* (148) columnlist ::= column */
- { 241, -2 }, /* (149) column ::= ids typename */
- { 238, -3 }, /* (150) tagitemlist ::= tagitemlist COMMA tagitem */
- { 238, -1 }, /* (151) tagitemlist ::= tagitem */
- { 242, -1 }, /* (152) tagitem ::= INTEGER */
- { 242, -1 }, /* (153) tagitem ::= FLOAT */
- { 242, -1 }, /* (154) tagitem ::= STRING */
- { 242, -1 }, /* (155) tagitem ::= BOOL */
- { 242, -1 }, /* (156) tagitem ::= NULL */
- { 242, -1 }, /* (157) tagitem ::= NOW */
- { 242, -2 }, /* (158) tagitem ::= MINUS INTEGER */
- { 242, -2 }, /* (159) tagitem ::= MINUS FLOAT */
- { 242, -2 }, /* (160) tagitem ::= PLUS INTEGER */
- { 242, -2 }, /* (161) tagitem ::= PLUS FLOAT */
- { 240, -14 }, /* (162) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
- { 240, -3 }, /* (163) select ::= LP select RP */
- { 256, -1 }, /* (164) union ::= select */
- { 256, -4 }, /* (165) union ::= union UNION ALL select */
- { 192, -1 }, /* (166) cmd ::= union */
- { 240, -2 }, /* (167) select ::= SELECT selcollist */
- { 257, -2 }, /* (168) sclp ::= selcollist COMMA */
- { 257, 0 }, /* (169) sclp ::= */
- { 243, -4 }, /* (170) selcollist ::= sclp distinct expr as */
- { 243, -2 }, /* (171) selcollist ::= sclp STAR */
- { 260, -2 }, /* (172) as ::= AS ids */
- { 260, -1 }, /* (173) as ::= ids */
- { 260, 0 }, /* (174) as ::= */
- { 258, -1 }, /* (175) distinct ::= DISTINCT */
- { 258, 0 }, /* (176) distinct ::= */
- { 244, -2 }, /* (177) from ::= FROM tablelist */
- { 244, -2 }, /* (178) from ::= FROM sub */
- { 262, -3 }, /* (179) sub ::= LP union RP */
- { 262, -4 }, /* (180) sub ::= LP union RP ids */
- { 262, -6 }, /* (181) sub ::= sub COMMA LP union RP ids */
- { 261, -2 }, /* (182) tablelist ::= ids cpxName */
- { 261, -3 }, /* (183) tablelist ::= ids cpxName ids */
- { 261, -4 }, /* (184) tablelist ::= tablelist COMMA ids cpxName */
- { 261, -5 }, /* (185) tablelist ::= tablelist COMMA ids cpxName ids */
- { 263, -1 }, /* (186) tmvar ::= VARIABLE */
- { 246, -4 }, /* (187) interval_opt ::= INTERVAL LP tmvar RP */
- { 246, -6 }, /* (188) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
- { 246, 0 }, /* (189) interval_opt ::= */
- { 247, 0 }, /* (190) session_option ::= */
- { 247, -7 }, /* (191) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
- { 248, 0 }, /* (192) windowstate_option ::= */
- { 248, -4 }, /* (193) windowstate_option ::= STATE_WINDOW LP ids RP */
- { 249, 0 }, /* (194) fill_opt ::= */
- { 249, -6 }, /* (195) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- { 249, -4 }, /* (196) fill_opt ::= FILL LP ID RP */
- { 250, -4 }, /* (197) sliding_opt ::= SLIDING LP tmvar RP */
- { 250, 0 }, /* (198) sliding_opt ::= */
- { 252, 0 }, /* (199) orderby_opt ::= */
- { 252, -3 }, /* (200) orderby_opt ::= ORDER BY sortlist */
- { 264, -4 }, /* (201) sortlist ::= sortlist COMMA item sortorder */
- { 264, -2 }, /* (202) sortlist ::= item sortorder */
- { 266, -2 }, /* (203) item ::= ids cpxName */
- { 267, -1 }, /* (204) sortorder ::= ASC */
- { 267, -1 }, /* (205) sortorder ::= DESC */
- { 267, 0 }, /* (206) sortorder ::= */
- { 251, 0 }, /* (207) groupby_opt ::= */
- { 251, -3 }, /* (208) groupby_opt ::= GROUP BY grouplist */
- { 268, -3 }, /* (209) grouplist ::= grouplist COMMA item */
- { 268, -1 }, /* (210) grouplist ::= item */
- { 253, 0 }, /* (211) having_opt ::= */
- { 253, -2 }, /* (212) having_opt ::= HAVING expr */
- { 255, 0 }, /* (213) limit_opt ::= */
- { 255, -2 }, /* (214) limit_opt ::= LIMIT signed */
- { 255, -4 }, /* (215) limit_opt ::= LIMIT signed OFFSET signed */
- { 255, -4 }, /* (216) limit_opt ::= LIMIT signed COMMA signed */
- { 254, 0 }, /* (217) slimit_opt ::= */
- { 254, -2 }, /* (218) slimit_opt ::= SLIMIT signed */
- { 254, -4 }, /* (219) slimit_opt ::= SLIMIT signed SOFFSET signed */
- { 254, -4 }, /* (220) slimit_opt ::= SLIMIT signed COMMA signed */
- { 245, 0 }, /* (221) where_opt ::= */
- { 245, -2 }, /* (222) where_opt ::= WHERE expr */
- { 259, -3 }, /* (223) expr ::= LP expr RP */
- { 259, -1 }, /* (224) expr ::= ID */
- { 259, -3 }, /* (225) expr ::= ID DOT ID */
- { 259, -3 }, /* (226) expr ::= ID DOT STAR */
- { 259, -1 }, /* (227) expr ::= INTEGER */
- { 259, -2 }, /* (228) expr ::= MINUS INTEGER */
- { 259, -2 }, /* (229) expr ::= PLUS INTEGER */
- { 259, -1 }, /* (230) expr ::= FLOAT */
- { 259, -2 }, /* (231) expr ::= MINUS FLOAT */
- { 259, -2 }, /* (232) expr ::= PLUS FLOAT */
- { 259, -1 }, /* (233) expr ::= STRING */
- { 259, -1 }, /* (234) expr ::= NOW */
- { 259, -1 }, /* (235) expr ::= VARIABLE */
- { 259, -2 }, /* (236) expr ::= PLUS VARIABLE */
- { 259, -2 }, /* (237) expr ::= MINUS VARIABLE */
- { 259, -1 }, /* (238) expr ::= BOOL */
- { 259, -1 }, /* (239) expr ::= NULL */
- { 259, -4 }, /* (240) expr ::= ID LP exprlist RP */
- { 259, -4 }, /* (241) expr ::= ID LP STAR RP */
- { 259, -3 }, /* (242) expr ::= expr IS NULL */
- { 259, -4 }, /* (243) expr ::= expr IS NOT NULL */
- { 259, -3 }, /* (244) expr ::= expr LT expr */
- { 259, -3 }, /* (245) expr ::= expr GT expr */
- { 259, -3 }, /* (246) expr ::= expr LE expr */
- { 259, -3 }, /* (247) expr ::= expr GE expr */
- { 259, -3 }, /* (248) expr ::= expr NE expr */
- { 259, -3 }, /* (249) expr ::= expr EQ expr */
- { 259, -5 }, /* (250) expr ::= expr BETWEEN expr AND expr */
- { 259, -3 }, /* (251) expr ::= expr AND expr */
- { 259, -3 }, /* (252) expr ::= expr OR expr */
- { 259, -3 }, /* (253) expr ::= expr PLUS expr */
- { 259, -3 }, /* (254) expr ::= expr MINUS expr */
- { 259, -3 }, /* (255) expr ::= expr STAR expr */
- { 259, -3 }, /* (256) expr ::= expr SLASH expr */
- { 259, -3 }, /* (257) expr ::= expr REM expr */
- { 259, -3 }, /* (258) expr ::= expr LIKE expr */
- { 259, -5 }, /* (259) expr ::= expr IN LP exprlist RP */
- { 200, -3 }, /* (260) exprlist ::= exprlist COMMA expritem */
- { 200, -1 }, /* (261) exprlist ::= expritem */
- { 269, -1 }, /* (262) expritem ::= expr */
- { 269, 0 }, /* (263) expritem ::= */
- { 192, -3 }, /* (264) cmd ::= RESET QUERY CACHE */
- { 192, -3 }, /* (265) cmd ::= SYNCDB ids REPLICA */
- { 192, -7 }, /* (266) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- { 192, -7 }, /* (267) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- { 192, -7 }, /* (268) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
- { 192, -7 }, /* (269) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- { 192, -7 }, /* (270) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- { 192, -8 }, /* (271) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- { 192, -9 }, /* (272) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- { 192, -7 }, /* (273) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
- { 192, -7 }, /* (274) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- { 192, -7 }, /* (275) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- { 192, -7 }, /* (276) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
- { 192, -7 }, /* (277) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- { 192, -7 }, /* (278) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- { 192, -8 }, /* (279) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- { 192, -9 }, /* (280) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
- { 192, -7 }, /* (281) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
- { 192, -3 }, /* (282) cmd ::= KILL CONNECTION INTEGER */
- { 192, -5 }, /* (283) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- { 192, -5 }, /* (284) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ { 197, -2 }, /* (122) alter_db_optr ::= alter_db_optr update */
+ { 197, -2 }, /* (123) alter_db_optr ::= alter_db_optr cachelast */
+ { 198, -1 }, /* (124) alter_topic_optr ::= alter_db_optr */
+ { 198, -2 }, /* (125) alter_topic_optr ::= alter_topic_optr partitions */
+ { 231, -1 }, /* (126) typename ::= ids */
+ { 231, -4 }, /* (127) typename ::= ids LP signed RP */
+ { 231, -2 }, /* (128) typename ::= ids UNSIGNED */
+ { 232, -1 }, /* (129) signed ::= INTEGER */
+ { 232, -2 }, /* (130) signed ::= PLUS INTEGER */
+ { 232, -2 }, /* (131) signed ::= MINUS INTEGER */
+ { 192, -3 }, /* (132) cmd ::= CREATE TABLE create_table_args */
+ { 192, -3 }, /* (133) cmd ::= CREATE TABLE create_stable_args */
+ { 192, -3 }, /* (134) cmd ::= CREATE STABLE create_stable_args */
+ { 192, -3 }, /* (135) cmd ::= CREATE TABLE create_table_list */
+ { 235, -1 }, /* (136) create_table_list ::= create_from_stable */
+ { 235, -2 }, /* (137) create_table_list ::= create_table_list create_from_stable */
+ { 233, -6 }, /* (138) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+ { 234, -10 }, /* (139) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+ { 236, -10 }, /* (140) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+ { 236, -13 }, /* (141) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+ { 239, -3 }, /* (142) tagNamelist ::= tagNamelist COMMA ids */
+ { 239, -1 }, /* (143) tagNamelist ::= ids */
+ { 233, -5 }, /* (144) create_table_args ::= ifnotexists ids cpxName AS select */
+ { 237, -3 }, /* (145) columnlist ::= columnlist COMMA column */
+ { 237, -1 }, /* (146) columnlist ::= column */
+ { 241, -2 }, /* (147) column ::= ids typename */
+ { 238, -3 }, /* (148) tagitemlist ::= tagitemlist COMMA tagitem */
+ { 238, -1 }, /* (149) tagitemlist ::= tagitem */
+ { 242, -1 }, /* (150) tagitem ::= INTEGER */
+ { 242, -1 }, /* (151) tagitem ::= FLOAT */
+ { 242, -1 }, /* (152) tagitem ::= STRING */
+ { 242, -1 }, /* (153) tagitem ::= BOOL */
+ { 242, -1 }, /* (154) tagitem ::= NULL */
+ { 242, -1 }, /* (155) tagitem ::= NOW */
+ { 242, -2 }, /* (156) tagitem ::= MINUS INTEGER */
+ { 242, -2 }, /* (157) tagitem ::= MINUS FLOAT */
+ { 242, -2 }, /* (158) tagitem ::= PLUS INTEGER */
+ { 242, -2 }, /* (159) tagitem ::= PLUS FLOAT */
+ { 240, -14 }, /* (160) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
+ { 240, -3 }, /* (161) select ::= LP select RP */
+ { 256, -1 }, /* (162) union ::= select */
+ { 256, -4 }, /* (163) union ::= union UNION ALL select */
+ { 192, -1 }, /* (164) cmd ::= union */
+ { 240, -2 }, /* (165) select ::= SELECT selcollist */
+ { 257, -2 }, /* (166) sclp ::= selcollist COMMA */
+ { 257, 0 }, /* (167) sclp ::= */
+ { 243, -4 }, /* (168) selcollist ::= sclp distinct expr as */
+ { 243, -2 }, /* (169) selcollist ::= sclp STAR */
+ { 260, -2 }, /* (170) as ::= AS ids */
+ { 260, -1 }, /* (171) as ::= ids */
+ { 260, 0 }, /* (172) as ::= */
+ { 258, -1 }, /* (173) distinct ::= DISTINCT */
+ { 258, 0 }, /* (174) distinct ::= */
+ { 244, -2 }, /* (175) from ::= FROM tablelist */
+ { 244, -2 }, /* (176) from ::= FROM sub */
+ { 262, -3 }, /* (177) sub ::= LP union RP */
+ { 262, -4 }, /* (178) sub ::= LP union RP ids */
+ { 262, -6 }, /* (179) sub ::= sub COMMA LP union RP ids */
+ { 261, -2 }, /* (180) tablelist ::= ids cpxName */
+ { 261, -3 }, /* (181) tablelist ::= ids cpxName ids */
+ { 261, -4 }, /* (182) tablelist ::= tablelist COMMA ids cpxName */
+ { 261, -5 }, /* (183) tablelist ::= tablelist COMMA ids cpxName ids */
+ { 263, -1 }, /* (184) tmvar ::= VARIABLE */
+ { 246, -4 }, /* (185) interval_opt ::= INTERVAL LP tmvar RP */
+ { 246, -6 }, /* (186) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
+ { 246, 0 }, /* (187) interval_opt ::= */
+ { 247, 0 }, /* (188) session_option ::= */
+ { 247, -7 }, /* (189) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+ { 248, 0 }, /* (190) windowstate_option ::= */
+ { 248, -4 }, /* (191) windowstate_option ::= STATE_WINDOW LP ids RP */
+ { 249, 0 }, /* (192) fill_opt ::= */
+ { 249, -6 }, /* (193) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ { 249, -4 }, /* (194) fill_opt ::= FILL LP ID RP */
+ { 250, -4 }, /* (195) sliding_opt ::= SLIDING LP tmvar RP */
+ { 250, 0 }, /* (196) sliding_opt ::= */
+ { 253, 0 }, /* (197) orderby_opt ::= */
+ { 253, -3 }, /* (198) orderby_opt ::= ORDER BY sortlist */
+ { 264, -4 }, /* (199) sortlist ::= sortlist COMMA item sortorder */
+ { 264, -2 }, /* (200) sortlist ::= item sortorder */
+ { 266, -2 }, /* (201) item ::= ids cpxName */
+ { 267, -1 }, /* (202) sortorder ::= ASC */
+ { 267, -1 }, /* (203) sortorder ::= DESC */
+ { 267, 0 }, /* (204) sortorder ::= */
+ { 251, 0 }, /* (205) groupby_opt ::= */
+ { 251, -3 }, /* (206) groupby_opt ::= GROUP BY grouplist */
+ { 268, -3 }, /* (207) grouplist ::= grouplist COMMA item */
+ { 268, -1 }, /* (208) grouplist ::= item */
+ { 252, 0 }, /* (209) having_opt ::= */
+ { 252, -2 }, /* (210) having_opt ::= HAVING expr */
+ { 255, 0 }, /* (211) limit_opt ::= */
+ { 255, -2 }, /* (212) limit_opt ::= LIMIT signed */
+ { 255, -4 }, /* (213) limit_opt ::= LIMIT signed OFFSET signed */
+ { 255, -4 }, /* (214) limit_opt ::= LIMIT signed COMMA signed */
+ { 254, 0 }, /* (215) slimit_opt ::= */
+ { 254, -2 }, /* (216) slimit_opt ::= SLIMIT signed */
+ { 254, -4 }, /* (217) slimit_opt ::= SLIMIT signed SOFFSET signed */
+ { 254, -4 }, /* (218) slimit_opt ::= SLIMIT signed COMMA signed */
+ { 245, 0 }, /* (219) where_opt ::= */
+ { 245, -2 }, /* (220) where_opt ::= WHERE expr */
+ { 259, -3 }, /* (221) expr ::= LP expr RP */
+ { 259, -1 }, /* (222) expr ::= ID */
+ { 259, -3 }, /* (223) expr ::= ID DOT ID */
+ { 259, -3 }, /* (224) expr ::= ID DOT STAR */
+ { 259, -1 }, /* (225) expr ::= INTEGER */
+ { 259, -2 }, /* (226) expr ::= MINUS INTEGER */
+ { 259, -2 }, /* (227) expr ::= PLUS INTEGER */
+ { 259, -1 }, /* (228) expr ::= FLOAT */
+ { 259, -2 }, /* (229) expr ::= MINUS FLOAT */
+ { 259, -2 }, /* (230) expr ::= PLUS FLOAT */
+ { 259, -1 }, /* (231) expr ::= STRING */
+ { 259, -1 }, /* (232) expr ::= NOW */
+ { 259, -1 }, /* (233) expr ::= VARIABLE */
+ { 259, -2 }, /* (234) expr ::= PLUS VARIABLE */
+ { 259, -2 }, /* (235) expr ::= MINUS VARIABLE */
+ { 259, -1 }, /* (236) expr ::= BOOL */
+ { 259, -1 }, /* (237) expr ::= NULL */
+ { 259, -4 }, /* (238) expr ::= ID LP exprlist RP */
+ { 259, -4 }, /* (239) expr ::= ID LP STAR RP */
+ { 259, -3 }, /* (240) expr ::= expr IS NULL */
+ { 259, -4 }, /* (241) expr ::= expr IS NOT NULL */
+ { 259, -3 }, /* (242) expr ::= expr LT expr */
+ { 259, -3 }, /* (243) expr ::= expr GT expr */
+ { 259, -3 }, /* (244) expr ::= expr LE expr */
+ { 259, -3 }, /* (245) expr ::= expr GE expr */
+ { 259, -3 }, /* (246) expr ::= expr NE expr */
+ { 259, -3 }, /* (247) expr ::= expr EQ expr */
+ { 259, -5 }, /* (248) expr ::= expr BETWEEN expr AND expr */
+ { 259, -3 }, /* (249) expr ::= expr AND expr */
+ { 259, -3 }, /* (250) expr ::= expr OR expr */
+ { 259, -3 }, /* (251) expr ::= expr PLUS expr */
+ { 259, -3 }, /* (252) expr ::= expr MINUS expr */
+ { 259, -3 }, /* (253) expr ::= expr STAR expr */
+ { 259, -3 }, /* (254) expr ::= expr SLASH expr */
+ { 259, -3 }, /* (255) expr ::= expr REM expr */
+ { 259, -3 }, /* (256) expr ::= expr LIKE expr */
+ { 259, -5 }, /* (257) expr ::= expr IN LP exprlist RP */
+ { 200, -3 }, /* (258) exprlist ::= exprlist COMMA expritem */
+ { 200, -1 }, /* (259) exprlist ::= expritem */
+ { 269, -1 }, /* (260) expritem ::= expr */
+ { 269, 0 }, /* (261) expritem ::= */
+ { 192, -3 }, /* (262) cmd ::= RESET QUERY CACHE */
+ { 192, -3 }, /* (263) cmd ::= SYNCDB ids REPLICA */
+ { 192, -7 }, /* (264) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ { 192, -7 }, /* (265) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ { 192, -7 }, /* (266) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+ { 192, -7 }, /* (267) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ { 192, -7 }, /* (268) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ { 192, -8 }, /* (269) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ { 192, -9 }, /* (270) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ { 192, -7 }, /* (271) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+ { 192, -7 }, /* (272) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ { 192, -7 }, /* (273) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ { 192, -7 }, /* (274) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+ { 192, -7 }, /* (275) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ { 192, -7 }, /* (276) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ { 192, -8 }, /* (277) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ { 192, -9 }, /* (278) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+ { 192, -7 }, /* (279) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+ { 192, -3 }, /* (280) cmd ::= KILL CONNECTION INTEGER */
+ { 192, -5 }, /* (281) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ { 192, -5 }, /* (282) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -2166,9 +2162,9 @@ static void yy_reduce(
/********** Begin reduce actions **********************************************/
YYMINORTYPE yylhsminor;
case 0: /* program ::= cmd */
- case 134: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==134);
- case 135: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==135);
- case 136: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==136);
+ case 132: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==132);
+ case 133: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==133);
+ case 134: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==134);
{}
break;
case 1: /* cmd ::= SHOW DATABASES */
@@ -2359,7 +2355,7 @@ static void yy_reduce(
break;
case 52: /* ifexists ::= */
case 54: /* ifnotexists ::= */ yytestcase(yyruleno==54);
- case 176: /* distinct ::= */ yytestcase(yyruleno==176);
+ case 174: /* distinct ::= */ yytestcase(yyruleno==174);
{ yymsp[1].minor.yy0.n = 0;}
break;
case 53: /* ifnotexists ::= IF NOT EXISTS */
@@ -2415,20 +2411,20 @@ static void yy_reduce(
yymsp[-8].minor.yy151 = yylhsminor.yy151;
break;
case 79: /* intitemlist ::= intitemlist COMMA intitem */
- case 150: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==150);
+ case 148: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==148);
{ yylhsminor.yy441 = tVariantListAppend(yymsp[-2].minor.yy441, &yymsp[0].minor.yy506, -1); }
yymsp[-2].minor.yy441 = yylhsminor.yy441;
break;
case 80: /* intitemlist ::= intitem */
- case 151: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==151);
+ case 149: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==149);
{ yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1); }
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
case 81: /* intitem ::= INTEGER */
- case 152: /* tagitem ::= INTEGER */ yytestcase(yyruleno==152);
- case 153: /* tagitem ::= FLOAT */ yytestcase(yyruleno==153);
- case 154: /* tagitem ::= STRING */ yytestcase(yyruleno==154);
- case 155: /* tagitem ::= BOOL */ yytestcase(yyruleno==155);
+ case 150: /* tagitem ::= INTEGER */ yytestcase(yyruleno==150);
+ case 151: /* tagitem ::= FLOAT */ yytestcase(yyruleno==151);
+ case 152: /* tagitem ::= STRING */ yytestcase(yyruleno==152);
+ case 153: /* tagitem ::= BOOL */ yytestcase(yyruleno==153);
{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0); }
yymsp[0].minor.yy506 = yylhsminor.yy506;
break;
@@ -2491,12 +2487,10 @@ static void yy_reduce(
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 107: /* db_optr ::= db_optr wal */
- case 122: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==122);
{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 108: /* db_optr ::= db_optr fsync */
- case 123: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==123);
{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
@@ -2515,36 +2509,36 @@ static void yy_reduce(
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 112: /* db_optr ::= db_optr update */
- case 124: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==124);
+ case 122: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==122);
{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 113: /* db_optr ::= db_optr cachelast */
- case 125: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==125);
+ case 123: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==123);
{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 114: /* topic_optr ::= db_optr */
- case 126: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==126);
+ case 124: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==124);
{ yylhsminor.yy382 = yymsp[0].minor.yy382; yylhsminor.yy382.dbType = TSDB_DB_TYPE_TOPIC; }
yymsp[0].minor.yy382 = yylhsminor.yy382;
break;
case 115: /* topic_optr ::= topic_optr partitions */
- case 127: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==127);
+ case 125: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==125);
{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy382 = yylhsminor.yy382;
break;
case 116: /* alter_db_optr ::= */
{ setDefaultCreateDbOption(&yymsp[1].minor.yy382); yymsp[1].minor.yy382.dbType = TSDB_DB_TYPE_DEFAULT;}
break;
- case 128: /* typename ::= ids */
+ case 126: /* typename ::= ids */
{
yymsp[0].minor.yy0.type = 0;
tSetColumnType (&yylhsminor.yy343, &yymsp[0].minor.yy0);
}
yymsp[0].minor.yy343 = yylhsminor.yy343;
break;
- case 129: /* typename ::= ids LP signed RP */
+ case 127: /* typename ::= ids LP signed RP */
{
if (yymsp[-1].minor.yy369 <= 0) {
yymsp[-3].minor.yy0.type = 0;
@@ -2556,7 +2550,7 @@ static void yy_reduce(
}
yymsp[-3].minor.yy343 = yylhsminor.yy343;
break;
- case 130: /* typename ::= ids UNSIGNED */
+ case 128: /* typename ::= ids UNSIGNED */
{
yymsp[-1].minor.yy0.type = 0;
yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z);
@@ -2564,20 +2558,20 @@ static void yy_reduce(
}
yymsp[-1].minor.yy343 = yylhsminor.yy343;
break;
- case 131: /* signed ::= INTEGER */
+ case 129: /* signed ::= INTEGER */
{ yylhsminor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[0].minor.yy369 = yylhsminor.yy369;
break;
- case 132: /* signed ::= PLUS INTEGER */
+ case 130: /* signed ::= PLUS INTEGER */
{ yymsp[-1].minor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
break;
- case 133: /* signed ::= MINUS INTEGER */
+ case 131: /* signed ::= MINUS INTEGER */
{ yymsp[-1].minor.yy369 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
break;
- case 137: /* cmd ::= CREATE TABLE create_table_list */
+ case 135: /* cmd ::= CREATE TABLE create_table_list */
{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy182;}
break;
- case 138: /* create_table_list ::= create_from_stable */
+ case 136: /* create_table_list ::= create_from_stable */
{
SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql));
pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo));
@@ -2588,14 +2582,14 @@ static void yy_reduce(
}
yymsp[0].minor.yy182 = yylhsminor.yy182;
break;
- case 139: /* create_table_list ::= create_table_list create_from_stable */
+ case 137: /* create_table_list ::= create_table_list create_from_stable */
{
taosArrayPush(yymsp[-1].minor.yy182->childTableInfo, &yymsp[0].minor.yy456);
yylhsminor.yy182 = yymsp[-1].minor.yy182;
}
yymsp[-1].minor.yy182 = yylhsminor.yy182;
break;
- case 140: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+ case 138: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
{
yylhsminor.yy182 = tSetCreateTableInfo(yymsp[-1].minor.yy441, NULL, NULL, TSQL_CREATE_TABLE);
setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE);
@@ -2605,7 +2599,7 @@ static void yy_reduce(
}
yymsp[-5].minor.yy182 = yylhsminor.yy182;
break;
- case 141: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+ case 139: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
{
yylhsminor.yy182 = tSetCreateTableInfo(yymsp[-5].minor.yy441, yymsp[-1].minor.yy441, NULL, TSQL_CREATE_STABLE);
setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE);
@@ -2615,7 +2609,7 @@ static void yy_reduce(
}
yymsp[-9].minor.yy182 = yylhsminor.yy182;
break;
- case 142: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+ case 140: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
@@ -2623,7 +2617,7 @@ static void yy_reduce(
}
yymsp[-9].minor.yy456 = yylhsminor.yy456;
break;
- case 143: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+ case 141: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
{
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n;
@@ -2631,15 +2625,15 @@ static void yy_reduce(
}
yymsp[-12].minor.yy456 = yylhsminor.yy456;
break;
- case 144: /* tagNamelist ::= tagNamelist COMMA ids */
+ case 142: /* tagNamelist ::= tagNamelist COMMA ids */
{taosArrayPush(yymsp[-2].minor.yy441, &yymsp[0].minor.yy0); yylhsminor.yy441 = yymsp[-2].minor.yy441; }
yymsp[-2].minor.yy441 = yylhsminor.yy441;
break;
- case 145: /* tagNamelist ::= ids */
+ case 143: /* tagNamelist ::= ids */
{yylhsminor.yy441 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy441, &yymsp[0].minor.yy0);}
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
- case 146: /* create_table_args ::= ifnotexists ids cpxName AS select */
+ case 144: /* create_table_args ::= ifnotexists ids cpxName AS select */
{
yylhsminor.yy182 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy236, TSQL_CREATE_STREAM);
setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE);
@@ -2649,32 +2643,32 @@ static void yy_reduce(
}
yymsp[-4].minor.yy182 = yylhsminor.yy182;
break;
- case 147: /* columnlist ::= columnlist COMMA column */
+ case 145: /* columnlist ::= columnlist COMMA column */
{taosArrayPush(yymsp[-2].minor.yy441, &yymsp[0].minor.yy343); yylhsminor.yy441 = yymsp[-2].minor.yy441; }
yymsp[-2].minor.yy441 = yylhsminor.yy441;
break;
- case 148: /* columnlist ::= column */
+ case 146: /* columnlist ::= column */
{yylhsminor.yy441 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy441, &yymsp[0].minor.yy343);}
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
- case 149: /* column ::= ids typename */
+ case 147: /* column ::= ids typename */
{
tSetColumnInfo(&yylhsminor.yy343, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy343);
}
yymsp[-1].minor.yy343 = yylhsminor.yy343;
break;
- case 156: /* tagitem ::= NULL */
+ case 154: /* tagitem ::= NULL */
{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0); }
yymsp[0].minor.yy506 = yylhsminor.yy506;
break;
- case 157: /* tagitem ::= NOW */
+ case 155: /* tagitem ::= NOW */
{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0);}
yymsp[0].minor.yy506 = yylhsminor.yy506;
break;
- case 158: /* tagitem ::= MINUS INTEGER */
- case 159: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==159);
- case 160: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==160);
- case 161: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==161);
+ case 156: /* tagitem ::= MINUS INTEGER */
+ case 157: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==157);
+ case 158: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==158);
+ case 159: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==159);
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type;
@@ -2683,142 +2677,142 @@ static void yy_reduce(
}
yymsp[-1].minor.yy506 = yylhsminor.yy506;
break;
- case 162: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
+ case 160: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
{
- yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy441, yymsp[-11].minor.yy244, yymsp[-10].minor.yy166, yymsp[-4].minor.yy441, yymsp[-3].minor.yy441, &yymsp[-9].minor.yy340, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy348, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy441, &yymsp[0].minor.yy414, &yymsp[-1].minor.yy414, yymsp[-2].minor.yy166);
+ yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy441, yymsp[-11].minor.yy244, yymsp[-10].minor.yy166, yymsp[-4].minor.yy441, yymsp[-2].minor.yy441, &yymsp[-9].minor.yy340, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy348, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy441, &yymsp[0].minor.yy414, &yymsp[-1].minor.yy414, yymsp[-3].minor.yy166);
}
yymsp[-13].minor.yy236 = yylhsminor.yy236;
break;
- case 163: /* select ::= LP select RP */
+ case 161: /* select ::= LP select RP */
{yymsp[-2].minor.yy236 = yymsp[-1].minor.yy236;}
break;
- case 164: /* union ::= select */
+ case 162: /* union ::= select */
{ yylhsminor.yy441 = setSubclause(NULL, yymsp[0].minor.yy236); }
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
- case 165: /* union ::= union UNION ALL select */
+ case 163: /* union ::= union UNION ALL select */
{ yylhsminor.yy441 = appendSelectClause(yymsp[-3].minor.yy441, yymsp[0].minor.yy236); }
yymsp[-3].minor.yy441 = yylhsminor.yy441;
break;
- case 166: /* cmd ::= union */
+ case 164: /* cmd ::= union */
{ setSqlInfo(pInfo, yymsp[0].minor.yy441, NULL, TSDB_SQL_SELECT); }
break;
- case 167: /* select ::= SELECT selcollist */
+ case 165: /* select ::= SELECT selcollist */
{
yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy441, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
yymsp[-1].minor.yy236 = yylhsminor.yy236;
break;
- case 168: /* sclp ::= selcollist COMMA */
+ case 166: /* sclp ::= selcollist COMMA */
{yylhsminor.yy441 = yymsp[-1].minor.yy441;}
yymsp[-1].minor.yy441 = yylhsminor.yy441;
break;
- case 169: /* sclp ::= */
- case 199: /* orderby_opt ::= */ yytestcase(yyruleno==199);
+ case 167: /* sclp ::= */
+ case 197: /* orderby_opt ::= */ yytestcase(yyruleno==197);
{yymsp[1].minor.yy441 = 0;}
break;
- case 170: /* selcollist ::= sclp distinct expr as */
+ case 168: /* selcollist ::= sclp distinct expr as */
{
yylhsminor.yy441 = tSqlExprListAppend(yymsp[-3].minor.yy441, yymsp[-1].minor.yy166, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
}
yymsp[-3].minor.yy441 = yylhsminor.yy441;
break;
- case 171: /* selcollist ::= sclp STAR */
+ case 169: /* selcollist ::= sclp STAR */
{
tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL);
yylhsminor.yy441 = tSqlExprListAppend(yymsp[-1].minor.yy441, pNode, 0, 0);
}
yymsp[-1].minor.yy441 = yylhsminor.yy441;
break;
- case 172: /* as ::= AS ids */
+ case 170: /* as ::= AS ids */
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
break;
- case 173: /* as ::= ids */
+ case 171: /* as ::= ids */
{ yylhsminor.yy0 = yymsp[0].minor.yy0; }
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 174: /* as ::= */
+ case 172: /* as ::= */
{ yymsp[1].minor.yy0.n = 0; }
break;
- case 175: /* distinct ::= DISTINCT */
+ case 173: /* distinct ::= DISTINCT */
{ yylhsminor.yy0 = yymsp[0].minor.yy0; }
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 177: /* from ::= FROM tablelist */
- case 178: /* from ::= FROM sub */ yytestcase(yyruleno==178);
+ case 175: /* from ::= FROM tablelist */
+ case 176: /* from ::= FROM sub */ yytestcase(yyruleno==176);
{yymsp[-1].minor.yy244 = yymsp[0].minor.yy244;}
break;
- case 179: /* sub ::= LP union RP */
+ case 177: /* sub ::= LP union RP */
{yymsp[-2].minor.yy244 = addSubqueryElem(NULL, yymsp[-1].minor.yy441, NULL);}
break;
- case 180: /* sub ::= LP union RP ids */
+ case 178: /* sub ::= LP union RP ids */
{yymsp[-3].minor.yy244 = addSubqueryElem(NULL, yymsp[-2].minor.yy441, &yymsp[0].minor.yy0);}
break;
- case 181: /* sub ::= sub COMMA LP union RP ids */
+ case 179: /* sub ::= sub COMMA LP union RP ids */
{yylhsminor.yy244 = addSubqueryElem(yymsp[-5].minor.yy244, yymsp[-2].minor.yy441, &yymsp[0].minor.yy0);}
yymsp[-5].minor.yy244 = yylhsminor.yy244;
break;
- case 182: /* tablelist ::= ids cpxName */
+ case 180: /* tablelist ::= ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yylhsminor.yy244 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL);
}
yymsp[-1].minor.yy244 = yylhsminor.yy244;
break;
- case 183: /* tablelist ::= ids cpxName ids */
+ case 181: /* tablelist ::= ids cpxName ids */
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
yylhsminor.yy244 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
yymsp[-2].minor.yy244 = yylhsminor.yy244;
break;
- case 184: /* tablelist ::= tablelist COMMA ids cpxName */
+ case 182: /* tablelist ::= tablelist COMMA ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yylhsminor.yy244 = setTableNameList(yymsp[-3].minor.yy244, &yymsp[-1].minor.yy0, NULL);
}
yymsp[-3].minor.yy244 = yylhsminor.yy244;
break;
- case 185: /* tablelist ::= tablelist COMMA ids cpxName ids */
+ case 183: /* tablelist ::= tablelist COMMA ids cpxName ids */
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
yylhsminor.yy244 = setTableNameList(yymsp[-4].minor.yy244, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
yymsp[-4].minor.yy244 = yylhsminor.yy244;
break;
- case 186: /* tmvar ::= VARIABLE */
+ case 184: /* tmvar ::= VARIABLE */
{yylhsminor.yy0 = yymsp[0].minor.yy0;}
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 187: /* interval_opt ::= INTERVAL LP tmvar RP */
+ case 185: /* interval_opt ::= INTERVAL LP tmvar RP */
{yymsp[-3].minor.yy340.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy340.offset.n = 0;}
break;
- case 188: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
+ case 186: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
{yymsp[-5].minor.yy340.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy340.offset = yymsp[-1].minor.yy0;}
break;
- case 189: /* interval_opt ::= */
+ case 187: /* interval_opt ::= */
{memset(&yymsp[1].minor.yy340, 0, sizeof(yymsp[1].minor.yy340));}
break;
- case 190: /* session_option ::= */
+ case 188: /* session_option ::= */
{yymsp[1].minor.yy259.col.n = 0; yymsp[1].minor.yy259.gap.n = 0;}
break;
- case 191: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+ case 189: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
yymsp[-6].minor.yy259.col = yymsp[-4].minor.yy0;
yymsp[-6].minor.yy259.gap = yymsp[-1].minor.yy0;
}
break;
- case 192: /* windowstate_option ::= */
+ case 190: /* windowstate_option ::= */
{ yymsp[1].minor.yy348.col.n = 0; yymsp[1].minor.yy348.col.z = NULL;}
break;
- case 193: /* windowstate_option ::= STATE_WINDOW LP ids RP */
+ case 191: /* windowstate_option ::= STATE_WINDOW LP ids RP */
{ yymsp[-3].minor.yy348.col = yymsp[-1].minor.yy0; }
break;
- case 194: /* fill_opt ::= */
+ case 192: /* fill_opt ::= */
{ yymsp[1].minor.yy441 = 0; }
break;
- case 195: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ case 193: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
{
tVariant A = {0};
toTSDBType(yymsp[-3].minor.yy0.type);
@@ -2828,34 +2822,34 @@ static void yy_reduce(
yymsp[-5].minor.yy441 = yymsp[-1].minor.yy441;
}
break;
- case 196: /* fill_opt ::= FILL LP ID RP */
+ case 194: /* fill_opt ::= FILL LP ID RP */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-3].minor.yy441 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
}
break;
- case 197: /* sliding_opt ::= SLIDING LP tmvar RP */
+ case 195: /* sliding_opt ::= SLIDING LP tmvar RP */
{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; }
break;
- case 198: /* sliding_opt ::= */
+ case 196: /* sliding_opt ::= */
{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; }
break;
- case 200: /* orderby_opt ::= ORDER BY sortlist */
+ case 198: /* orderby_opt ::= ORDER BY sortlist */
{yymsp[-2].minor.yy441 = yymsp[0].minor.yy441;}
break;
- case 201: /* sortlist ::= sortlist COMMA item sortorder */
+ case 199: /* sortlist ::= sortlist COMMA item sortorder */
{
yylhsminor.yy441 = tVariantListAppend(yymsp[-3].minor.yy441, &yymsp[-1].minor.yy506, yymsp[0].minor.yy112);
}
yymsp[-3].minor.yy441 = yylhsminor.yy441;
break;
- case 202: /* sortlist ::= item sortorder */
+ case 200: /* sortlist ::= item sortorder */
{
yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[-1].minor.yy506, yymsp[0].minor.yy112);
}
yymsp[-1].minor.yy441 = yylhsminor.yy441;
break;
- case 203: /* item ::= ids cpxName */
+ case 201: /* item ::= ids cpxName */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
@@ -2864,227 +2858,227 @@ static void yy_reduce(
}
yymsp[-1].minor.yy506 = yylhsminor.yy506;
break;
- case 204: /* sortorder ::= ASC */
+ case 202: /* sortorder ::= ASC */
{ yymsp[0].minor.yy112 = TSDB_ORDER_ASC; }
break;
- case 205: /* sortorder ::= DESC */
+ case 203: /* sortorder ::= DESC */
{ yymsp[0].minor.yy112 = TSDB_ORDER_DESC;}
break;
- case 206: /* sortorder ::= */
+ case 204: /* sortorder ::= */
{ yymsp[1].minor.yy112 = TSDB_ORDER_ASC; }
break;
- case 207: /* groupby_opt ::= */
+ case 205: /* groupby_opt ::= */
{ yymsp[1].minor.yy441 = 0;}
break;
- case 208: /* groupby_opt ::= GROUP BY grouplist */
+ case 206: /* groupby_opt ::= GROUP BY grouplist */
{ yymsp[-2].minor.yy441 = yymsp[0].minor.yy441;}
break;
- case 209: /* grouplist ::= grouplist COMMA item */
+ case 207: /* grouplist ::= grouplist COMMA item */
{
yylhsminor.yy441 = tVariantListAppend(yymsp[-2].minor.yy441, &yymsp[0].minor.yy506, -1);
}
yymsp[-2].minor.yy441 = yylhsminor.yy441;
break;
- case 210: /* grouplist ::= item */
+ case 208: /* grouplist ::= item */
{
yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1);
}
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
- case 211: /* having_opt ::= */
- case 221: /* where_opt ::= */ yytestcase(yyruleno==221);
- case 263: /* expritem ::= */ yytestcase(yyruleno==263);
+ case 209: /* having_opt ::= */
+ case 219: /* where_opt ::= */ yytestcase(yyruleno==219);
+ case 261: /* expritem ::= */ yytestcase(yyruleno==261);
{yymsp[1].minor.yy166 = 0;}
break;
- case 212: /* having_opt ::= HAVING expr */
- case 222: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==222);
+ case 210: /* having_opt ::= HAVING expr */
+ case 220: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==220);
{yymsp[-1].minor.yy166 = yymsp[0].minor.yy166;}
break;
- case 213: /* limit_opt ::= */
- case 217: /* slimit_opt ::= */ yytestcase(yyruleno==217);
+ case 211: /* limit_opt ::= */
+ case 215: /* slimit_opt ::= */ yytestcase(yyruleno==215);
{yymsp[1].minor.yy414.limit = -1; yymsp[1].minor.yy414.offset = 0;}
break;
- case 214: /* limit_opt ::= LIMIT signed */
- case 218: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==218);
+ case 212: /* limit_opt ::= LIMIT signed */
+ case 216: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==216);
{yymsp[-1].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-1].minor.yy414.offset = 0;}
break;
- case 215: /* limit_opt ::= LIMIT signed OFFSET signed */
+ case 213: /* limit_opt ::= LIMIT signed OFFSET signed */
{ yymsp[-3].minor.yy414.limit = yymsp[-2].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[0].minor.yy369;}
break;
- case 216: /* limit_opt ::= LIMIT signed COMMA signed */
+ case 214: /* limit_opt ::= LIMIT signed COMMA signed */
{ yymsp[-3].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[-2].minor.yy369;}
break;
- case 219: /* slimit_opt ::= SLIMIT signed SOFFSET signed */
+ case 217: /* slimit_opt ::= SLIMIT signed SOFFSET signed */
{yymsp[-3].minor.yy414.limit = yymsp[-2].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[0].minor.yy369;}
break;
- case 220: /* slimit_opt ::= SLIMIT signed COMMA signed */
+ case 218: /* slimit_opt ::= SLIMIT signed COMMA signed */
{yymsp[-3].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[-2].minor.yy369;}
break;
- case 223: /* expr ::= LP expr RP */
-{yylhsminor.yy166 = yymsp[-1].minor.yy166; yylhsminor.yy166->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy166->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
+ case 221: /* expr ::= LP expr RP */
+{yylhsminor.yy166 = yymsp[-1].minor.yy166; yylhsminor.yy166->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy166->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 224: /* expr ::= ID */
+ case 222: /* expr ::= ID */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 225: /* expr ::= ID DOT ID */
+ case 223: /* expr ::= ID DOT ID */
{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 226: /* expr ::= ID DOT STAR */
+ case 224: /* expr ::= ID DOT STAR */
{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 227: /* expr ::= INTEGER */
+ case 225: /* expr ::= INTEGER */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 228: /* expr ::= MINUS INTEGER */
- case 229: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==229);
+ case 226: /* expr ::= MINUS INTEGER */
+ case 227: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==227);
{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);}
yymsp[-1].minor.yy166 = yylhsminor.yy166;
break;
- case 230: /* expr ::= FLOAT */
+ case 228: /* expr ::= FLOAT */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 231: /* expr ::= MINUS FLOAT */
- case 232: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==232);
+ case 229: /* expr ::= MINUS FLOAT */
+ case 230: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==230);
{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);}
yymsp[-1].minor.yy166 = yylhsminor.yy166;
break;
- case 233: /* expr ::= STRING */
+ case 231: /* expr ::= STRING */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 234: /* expr ::= NOW */
+ case 232: /* expr ::= NOW */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); }
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 235: /* expr ::= VARIABLE */
+ case 233: /* expr ::= VARIABLE */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 236: /* expr ::= PLUS VARIABLE */
- case 237: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==237);
+ case 234: /* expr ::= PLUS VARIABLE */
+ case 235: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==235);
{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);}
yymsp[-1].minor.yy166 = yylhsminor.yy166;
break;
- case 238: /* expr ::= BOOL */
+ case 236: /* expr ::= BOOL */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 239: /* expr ::= NULL */
+ case 237: /* expr ::= NULL */
{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 240: /* expr ::= ID LP exprlist RP */
+ case 238: /* expr ::= ID LP exprlist RP */
{ yylhsminor.yy166 = tSqlExprCreateFunction(yymsp[-1].minor.yy441, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
yymsp[-3].minor.yy166 = yylhsminor.yy166;
break;
- case 241: /* expr ::= ID LP STAR RP */
+ case 239: /* expr ::= ID LP STAR RP */
{ yylhsminor.yy166 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
yymsp[-3].minor.yy166 = yylhsminor.yy166;
break;
- case 242: /* expr ::= expr IS NULL */
+ case 240: /* expr ::= expr IS NULL */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, NULL, TK_ISNULL);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 243: /* expr ::= expr IS NOT NULL */
+ case 241: /* expr ::= expr IS NOT NULL */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-3].minor.yy166, NULL, TK_NOTNULL);}
yymsp[-3].minor.yy166 = yylhsminor.yy166;
break;
- case 244: /* expr ::= expr LT expr */
+ case 242: /* expr ::= expr LT expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LT);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 245: /* expr ::= expr GT expr */
+ case 243: /* expr ::= expr GT expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_GT);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 246: /* expr ::= expr LE expr */
+ case 244: /* expr ::= expr LE expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LE);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 247: /* expr ::= expr GE expr */
+ case 245: /* expr ::= expr GE expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_GE);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 248: /* expr ::= expr NE expr */
+ case 246: /* expr ::= expr NE expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_NE);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 249: /* expr ::= expr EQ expr */
+ case 247: /* expr ::= expr EQ expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_EQ);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 250: /* expr ::= expr BETWEEN expr AND expr */
+ case 248: /* expr ::= expr BETWEEN expr AND expr */
{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy166); yylhsminor.yy166 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy166, yymsp[-2].minor.yy166, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy166, TK_LE), TK_AND);}
yymsp[-4].minor.yy166 = yylhsminor.yy166;
break;
- case 251: /* expr ::= expr AND expr */
+ case 249: /* expr ::= expr AND expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_AND);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 252: /* expr ::= expr OR expr */
+ case 250: /* expr ::= expr OR expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_OR); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 253: /* expr ::= expr PLUS expr */
+ case 251: /* expr ::= expr PLUS expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_PLUS); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 254: /* expr ::= expr MINUS expr */
+ case 252: /* expr ::= expr MINUS expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_MINUS); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 255: /* expr ::= expr STAR expr */
+ case 253: /* expr ::= expr STAR expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_STAR); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 256: /* expr ::= expr SLASH expr */
+ case 254: /* expr ::= expr SLASH expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_DIVIDE);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 257: /* expr ::= expr REM expr */
+ case 255: /* expr ::= expr REM expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_REM); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 258: /* expr ::= expr LIKE expr */
+ case 256: /* expr ::= expr LIKE expr */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LIKE); }
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
- case 259: /* expr ::= expr IN LP exprlist RP */
+ case 257: /* expr ::= expr IN LP exprlist RP */
{yylhsminor.yy166 = tSqlExprCreate(yymsp[-4].minor.yy166, (tSqlExpr*)yymsp[-1].minor.yy441, TK_IN); }
yymsp[-4].minor.yy166 = yylhsminor.yy166;
break;
- case 260: /* exprlist ::= exprlist COMMA expritem */
+ case 258: /* exprlist ::= exprlist COMMA expritem */
{yylhsminor.yy441 = tSqlExprListAppend(yymsp[-2].minor.yy441,yymsp[0].minor.yy166,0, 0);}
yymsp[-2].minor.yy441 = yylhsminor.yy441;
break;
- case 261: /* exprlist ::= expritem */
+ case 259: /* exprlist ::= expritem */
{yylhsminor.yy441 = tSqlExprListAppend(0,yymsp[0].minor.yy166,0, 0);}
yymsp[0].minor.yy441 = yylhsminor.yy441;
break;
- case 262: /* expritem ::= expr */
+ case 260: /* expritem ::= expr */
{yylhsminor.yy166 = yymsp[0].minor.yy166;}
yymsp[0].minor.yy166 = yylhsminor.yy166;
break;
- case 264: /* cmd ::= RESET QUERY CACHE */
+ case 262: /* cmd ::= RESET QUERY CACHE */
{ setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
break;
- case 265: /* cmd ::= SYNCDB ids REPLICA */
+ case 263: /* cmd ::= SYNCDB ids REPLICA */
{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);}
break;
- case 266: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ case 264: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 267: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ case 265: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3095,21 +3089,21 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 268: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+ case 266: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 269: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ case 267: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 270: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ case 268: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3120,7 +3114,7 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 271: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ case 269: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -3134,7 +3128,7 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 272: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ case 270: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
@@ -3146,21 +3140,21 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 273: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+ case 271: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 274: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ case 272: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 275: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ case 273: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3171,21 +3165,21 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 276: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+ case 274: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 277: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ case 275: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 278: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ case 276: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3196,7 +3190,7 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 279: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ case 277: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -3210,7 +3204,7 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 280: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+ case 278: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
@@ -3222,20 +3216,20 @@ static void yy_reduce(
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 281: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+ case 279: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 282: /* cmd ::= KILL CONNECTION INTEGER */
+ case 280: /* cmd ::= KILL CONNECTION INTEGER */
{setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);}
break;
- case 283: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ case 281: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);}
break;
- case 284: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ case 282: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);}
break;
default:
diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt
index bb7df70f4184b84bf6e180581072bcd9802fe20f..349d511f1570e3df835494ebd4e3e86d7795c873 100644
--- a/src/query/tests/CMakeLists.txt
+++ b/src/query/tests/CMakeLists.txt
@@ -1,10 +1,11 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11
diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt
index f94b4aeb6d21277b6b845587cd35a2c98e0bc0b0..14b77356baa4b87a201e6ff10e785db99cbd47a6 100644
--- a/src/rpc/CMakeLists.txt
+++ b/src/rpc/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index 605f7d2a326f9b2a66865d9ad0edc2987eb57f81..e958a8e5ec5b6542d609028ee052d21a9a84d397 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -1189,7 +1189,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte
}
rpcSendReqToServer(pRpc, pContext);
rpcFreeCont(rpcMsg.pCont);
- } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY) {
+ } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY || pHead->code == TSDB_CODE_DND_EXITING) {
pContext->code = pHead->code;
rpcProcessConnError(pContext, NULL);
rpcFreeCont(rpcMsg.pCont);
diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt
index c10cea6c9dd8c53ab8608c8a736795f2318059d8..a32ac9943d08fe00427ec58520809b4f04657315 100644
--- a/src/rpc/test/CMakeLists.txt
+++ b/src/rpc/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)
diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt
index 521f51ceb71245f96855d71e649e697eb7591df4..2cd84c7c3fff63a702d99d8b2dc45303f17528ef 100644
--- a/src/sync/CMakeLists.txt
+++ b/src/sync/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt
index f2b05ab2263c0d80bc870981f86933922de639e4..a5ab8191371ce97ecbaf9ef4dc8dbace6a6c4802 100644
--- a/src/sync/test/CMakeLists.txt
+++ b/src/sync/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX)
diff --git a/src/tfs/CMakeLists.txt b/src/tfs/CMakeLists.txt
index b435c84366fb47bd137b1c13bc98eab625bbcc66..7f956f07a21ed52363fc2072b01ad0853621712b 100644
--- a/src/tfs/CMakeLists.txt
+++ b/src/tfs/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt
index 8080a61a6c9b10b78d965a953765250e0a157fb6..c5b77df5a25f9f0b1e9294228520f171b9befddd 100644
--- a/src/tsdb/CMakeLists.txt
+++ b/src/tsdb/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h
index dcb5eadfab6c909ddd169fb3372799f7088c1903..b9d5431de6bc3864a4a13ea30356033de76da178 100644
--- a/src/tsdb/inc/tsdbFile.h
+++ b/src/tsdb/inc/tsdbFile.h
@@ -350,8 +350,8 @@ static FORCE_INLINE int tsdbCopyDFileSet(SDFileSet* pSrc, SDFileSet* pDest) {
}
static FORCE_INLINE void tsdbGetFidKeyRange(int days, int8_t precision, int fid, TSKEY* minKey, TSKEY* maxKey) {
- *minKey = fid * days * tsMsPerDay[precision];
- *maxKey = *minKey + days * tsMsPerDay[precision] - 1;
+ *minKey = fid * days * tsTickPerDay[precision];
+ *maxKey = *minKey + days * tsTickPerDay[precision] - 1;
}
static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet* pSet) {
diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c
index 82cc6f07f77300aadd554a7c22c0cf77308b3e53..75b072b063772f898753ac46ef8f05ccb490007c 100644
--- a/src/tsdb/src/tsdbCommit.c
+++ b/src/tsdb/src/tsdbCommit.c
@@ -17,9 +17,9 @@
#define TSDB_MAX_SUBBLOCKS 8
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
- return (int)((key + 1) / tsMsPerDay[precision] / days - 1);
+ return (int)((key + 1) / tsTickPerDay[precision] / days - 1);
} else {
- return (int)((key / tsMsPerDay[precision] / days));
+ return (int)((key / tsTickPerDay[precision] / days));
}
}
@@ -363,9 +363,9 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) {
TSKEY minKey, midKey, maxKey, now;
now = taosGetTimestamp(pCfg->precision);
- minKey = now - pCfg->keep * tsMsPerDay[pCfg->precision];
- midKey = now - pCfg->keep2 * tsMsPerDay[pCfg->precision];
- maxKey = now - pCfg->keep1 * tsMsPerDay[pCfg->precision];
+ minKey = now - pCfg->keep * tsTickPerDay[pCfg->precision];
+ midKey = now - pCfg->keep2 * tsTickPerDay[pCfg->precision];
+ maxKey = now - pCfg->keep1 * tsTickPerDay[pCfg->precision];
pRtn->minKey = minKey;
pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->daysPerFile, pCfg->precision));
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index 54372ae8c28d91a72243256a74a8fb53c317eab2..e53d2826c76acb057020b05bfeba4e22cf128c51 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -771,7 +771,7 @@ int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) {
int64_t maxBufSize = 0;
SMFInfo minfo;
- taosHashEmpty(pfs->metaCache);
+ taosHashClear(pfs->metaCache);
// No meta file, just return
if (pfs->cstatus->pmf == NULL) return 0;
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index d44f8ec74874fa990992fc008671878088d872f0..09f3585b9d60151edc56995ba25ac063297dad5d 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -128,6 +128,8 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) {
tsdbSyncCommit(repo);
}
+ tsem_wait(&(pRepo->readyToCommit));
+
tsdbUnRefMemTable(pRepo, pRepo->mem);
tsdbUnRefMemTable(pRepo, pRepo->imem);
pRepo->mem = NULL;
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index 9d8b1ca7f2889f40b696f04a608dd166adf6eac6..50b5d321773f6d9d51d29759eb16a9b07bc7e1d8 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -632,8 +632,8 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
SSubmitBlkIter blkIter = {0};
SDataRow row = NULL;
TSKEY now = taosGetTimestamp(pRepo->config.precision);
- TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep;
- TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
+ TSKEY minKey = now - tsTickPerDay[pRepo->config.precision] * pRepo->config.keep;
+ TSKEY maxKey = now + tsTickPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 92a0d489b3b28820a20706318883bb7b6a280820..b17aa755a5ee18cfc61ab05fbbf36d9463cfc0dd 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -39,6 +39,12 @@ enum {
TSDB_QUERY_TYPE_LAST = 2,
};
+enum {
+ TSDB_CACHED_TYPE_NONE = 0,
+ TSDB_CACHED_TYPE_LASTROW = 1,
+ TSDB_CACHED_TYPE_LAST = 2,
+};
+
typedef struct SQueryFilePos {
int32_t fid;
int32_t slot;
@@ -280,9 +286,13 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
info.tableId.uid = info.pTableObj->tableId.uid;
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
- assert(info.lastKey >= pQueryHandle->window.skey);
+ if (info.lastKey == INT64_MIN || info.lastKey < pQueryHandle->window.skey) {
+ info.lastKey = pQueryHandle->window.skey;
+ }
+
+ assert(info.lastKey >= pQueryHandle->window.skey && info.lastKey <= pQueryHandle->window.ekey);
} else {
- assert(info.lastKey <= pQueryHandle->window.skey);
+ assert(info.lastKey >= pQueryHandle->window.ekey && info.lastKey <= pQueryHandle->window.skey);
}
taosArrayPush(pTableCheckInfo, &info);
@@ -339,14 +349,57 @@ static SArray* createCheckInfoFromCheckInfo(STableCheckInfo* pCheckInfo, TSKEY s
return pNew;
}
+static bool emptyQueryTimewindow(STsdbQueryHandle* pQueryHandle) {
+ assert(pQueryHandle != NULL);
+
+ STimeWindow* w = &pQueryHandle->window;
+ bool asc = ASCENDING_TRAVERSE(pQueryHandle->order);
+
+ return ((asc && w->skey > w->ekey) || (!asc && w->ekey > w->skey));
+}
+
+// Update the query time window according to the data time to live(TTL) information, in order to avoid to return
+// the expired data to client, even it is queried already.
+static int64_t getEarliestValidTimestamp(STsdbRepo* pTsdb) {
+ STsdbCfg* pCfg = &pTsdb->config;
+
+ int64_t now = taosGetTimestamp(pCfg->precision);
+ return now - (tsTickPerDay[pCfg->precision] * pCfg->keep) + 1; // needs to add one tick
+}
+
+static void setQueryTimewindow(STsdbQueryHandle* pQueryHandle, STsdbQueryCond* pCond) {
+ pQueryHandle->window = pCond->twindow;
+
+ bool updateTs = false;
+ int64_t startTs = getEarliestValidTimestamp(pQueryHandle->pTsdb);
+ if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
+ if (startTs > pQueryHandle->window.skey) {
+ pQueryHandle->window.skey = startTs;
+ pCond->twindow.skey = startTs;
+ updateTs = true;
+ }
+ } else {
+ if (startTs > pQueryHandle->window.ekey) {
+ pQueryHandle->window.ekey = startTs;
+ pCond->twindow.ekey = startTs;
+ updateTs = true;
+ }
+ }
+
+ if (updateTs) {
+ tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64
+ ", 0x%" PRIx64, pQueryHandle, pCond->twindow.skey, pCond->twindow.ekey, pQueryHandle->window.skey,
+ pQueryHandle->window.ekey, pQueryHandle->qId);
+ }
+}
+
static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
if (pQueryHandle == NULL) {
- goto out_of_memory;
+ goto _end;
}
pQueryHandle->order = pCond->order;
- pQueryHandle->window = pCond->twindow;
pQueryHandle->pTsdb = tsdb;
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
pQueryHandle->cur.fid = INT32_MIN;
@@ -354,36 +407,33 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
pQueryHandle->checkFiles = true;
pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qId = qId;
- pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
pQueryHandle->allocSize = 0;
pQueryHandle->locateStart = false;
pQueryHandle->pMemRef = pMemRef;
+ pQueryHandle->loadType = pCond->type;
+
+ pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
pQueryHandle->loadExternalRow = pCond->loadExternalRows;
pQueryHandle->currentLoadExternalRows = pCond->loadExternalRows;
- pQueryHandle->loadType = pCond->type;
-
if (tsdbInitReadH(&pQueryHandle->rhelper, (STsdbRepo*)tsdb) != 0) {
- goto out_of_memory;
+ goto _end;
}
assert(pCond != NULL && pMemRef != NULL);
- if (ASCENDING_TRAVERSE(pCond->order)) {
- assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
- } else {
- assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
- }
+ setQueryTimewindow(pQueryHandle, pCond);
+
if (pCond->numOfCols > 0) {
// allocate buffer in order to load data blocks from file
pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis));
if (pQueryHandle->statis == NULL) {
- goto out_of_memory;
+ goto _end;
}
- pQueryHandle->pColumns =
- taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
+ // todo: use list instead of array?
+ pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData));
if (pQueryHandle->pColumns == NULL) {
- goto out_of_memory;
+ goto _end;
}
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
@@ -392,14 +442,16 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
colInfo.info = pCond->colList[i];
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
if (colInfo.pData == NULL) {
- goto out_of_memory;
+ goto _end;
}
+
taosArrayPush(pQueryHandle->pColumns, &colInfo);
pQueryHandle->statis[i].colId = colInfo.info.colId;
}
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
}
+
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL);
@@ -407,7 +459,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
if (pQueryHandle->pDataCols == NULL) {
tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- goto out_of_memory;
+ goto _end;
}
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
@@ -415,7 +467,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
return (TsdbQueryHandleT) pQueryHandle;
- out_of_memory:
+ _end:
tsdbCleanupQueryHandle(pQueryHandle);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
@@ -423,6 +475,9 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, uint64_t qId, SMemRef* pRef) {
STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, pRef);
+ if (emptyQueryTimewindow(pQueryHandle)) {
+ return (TsdbQueryHandleT*) pQueryHandle;
+ }
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL);
@@ -433,6 +488,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &psTable);
if (pQueryHandle->pTableCheckInfo == NULL) {
tsdbCleanupQueryHandle(pQueryHandle);
+ taosArrayDestroy(psTable);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
@@ -446,6 +502,15 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond) {
STsdbQueryHandle* pQueryHandle = queryHandle;
+ if (emptyQueryTimewindow(pQueryHandle)) {
+ if (pCond->order != pQueryHandle->order) {
+ pQueryHandle->order = pCond->order;
+ SWAP(pQueryHandle->window.skey, pQueryHandle->window.ekey, int64_t);
+ }
+
+ return;
+ }
+
pQueryHandle->order = pCond->order;
pQueryHandle->window = pCond->twindow;
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
@@ -511,8 +576,6 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
}
-
-
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList);
@@ -864,10 +927,10 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio
}
if (key < 0) {
- key -= (daysPerFile * tsMsPerDay[precision]);
+ key -= (daysPerFile * tsTickPerDay[precision]);
}
- int64_t fid = (int64_t)(key / (daysPerFile * tsMsPerDay[precision])); // set the starting fileId
+ int64_t fid = (int64_t)(key / (daysPerFile * tsTickPerDay[precision])); // set the starting fileId
if (fid < 0L && llabs(fid) > INT32_MAX) { // data value overflow for INT32
fid = INT32_MIN;
}
@@ -1171,8 +1234,9 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) {
SQueryFilePos* cur = &pQueryHandle->cur;
int32_t code = TSDB_CODE_SUCCESS;
+ bool asc = ASCENDING_TRAVERSE(pQueryHandle->order);
- if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
+ if (asc) {
// query ended in/started from current block
if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) {
if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) {
@@ -1193,7 +1257,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
assert(pCheckInfo->lastKey <= pBlock->keyLast);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else { // the whole block is loaded in to buffer
- cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows - 1);
+ cur->pos = asc? 0:(pBlock->numOfRows - 1);
code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
} else { //desc order, query ended in current block
@@ -1213,7 +1277,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
assert(pCheckInfo->lastKey >= pBlock->keyFirst);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock);
} else {
- cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows-1);
+ cur->pos = asc? 0:(pBlock->numOfRows-1);
code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
}
@@ -2684,13 +2748,19 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
+ if (emptyQueryTimewindow(pQueryHandle)) {
+ tsdbDebug("%p query window not overlaps with the data set, no result returned, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
+ return false;
+ }
+
int64_t stime = taosGetTimestampUs();
int64_t elapsedTime = stime;
+ // TODO refactor: remove "type"
if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST) {
- if (pQueryHandle->cachelastrow == 1) {
+ if (pQueryHandle->cachelastrow == TSDB_CACHED_TYPE_LASTROW) {
return loadCachedLastRow(pQueryHandle);
- } else if (pQueryHandle->cachelastrow == 2) {
+ } else if (pQueryHandle->cachelastrow == TSDB_CACHED_TYPE_LAST) {
return loadCachedLast(pQueryHandle);
}
}
@@ -2803,6 +2873,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
SArray* psTable = NULL;
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pCurrent, pSecQueryHandle->window.skey, &psTable);
if (pSecQueryHandle->pTableCheckInfo == NULL) {
+ taosArrayDestroy(psTable);
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
goto out_of_memory;
}
@@ -2896,7 +2967,7 @@ out:
}
bool isTsdbCacheLastRow(TsdbQueryHandleT* pQueryHandle) {
- return ((STsdbQueryHandle *)pQueryHandle)->cachelastrow > 0;
+ return ((STsdbQueryHandle *)pQueryHandle)->cachelastrow > TSDB_CACHED_TYPE_NONE;
}
int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList) {
@@ -2914,9 +2985,9 @@ int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *g
if (((STable*)pInfo->pTable)->lastRow) {
code = tsdbGetCachedLastRow(pInfo->pTable, NULL, &key);
if (code != TSDB_CODE_SUCCESS) {
- pQueryHandle->cachelastrow = 0;
+ pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_NONE;
} else {
- pQueryHandle->cachelastrow = 1;
+ pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LASTROW;
}
}
@@ -2936,12 +3007,11 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) {
int32_t code = 0;
if (pQueryHandle->pTsdb && atomic_load_8(&pQueryHandle->pTsdb->hasCachedLastColumn)){
- pQueryHandle->cachelastrow = 2;
+ pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST;
}
// update the tsdb query time range
if (pQueryHandle->cachelastrow) {
- pQueryHandle->window = TSWINDOW_INITIALIZER;
pQueryHandle->checkFiles = false;
pQueryHandle->activeIndex = -1; // start from -1
}
@@ -3548,7 +3618,6 @@ int32_t tsdbGetOneTableGroup(STsdbRepo* tsdb, uint64_t uid, TSKEY startKey, STab
taosArrayPush(group, &info);
taosArrayPush(pGroupInfo->pGroupList, &group);
-
return TSDB_CODE_SUCCESS;
_error:
@@ -3637,15 +3706,21 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
return;
}
- pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo);
pQueryHandle->pColumns = doFreeColumnInfoData(pQueryHandle->pColumns);
taosArrayDestroy(pQueryHandle->defaultLoadColumn);
tfree(pQueryHandle->pDataBlockInfo);
tfree(pQueryHandle->statis);
- // todo check error
- tsdbMayUnTakeMemSnapshot(pQueryHandle);
+ if (!emptyQueryTimewindow(pQueryHandle)) {
+ tsdbMayUnTakeMemSnapshot(pQueryHandle);
+ } else {
+ assert(pQueryHandle->pTableCheckInfo == NULL);
+ }
+
+ if (pQueryHandle->pTableCheckInfo != NULL) {
+ pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo);
+ }
tsdbDestroyReadH(&pQueryHandle->rhelper);
diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt
index e8a1d61ee52c6461e88f6cdc16069b2b6b523ab5..85b15c0a4fcae0885ebd6f9a4739924146568345 100644
--- a/src/util/CMakeLists.txt
+++ b/src/util/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)
diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h
index 57c69f7bebb0d172963d57ee512d799762eed9f5..2134bbe30ddfd4da719de6e37ffe1264a65ca8e4 100644
--- a/src/util/inc/hash.h
+++ b/src/util/inc/hash.h
@@ -140,7 +140,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param);
-void taosHashEmpty(SHashObj *pHashObj);
+void taosHashClear(SHashObj *pHashObj);
/**
* clean up hash table
diff --git a/src/util/inc/talgo.h b/src/util/inc/talgo.h
index 9e3692225b6413353bf269d9ba1fbc8651273eb5..4aa54306052bfe224d81ac90f8310de7ac85f8eb 100644
--- a/src/util/inc/talgo.h
+++ b/src/util/inc/talgo.h
@@ -34,6 +34,7 @@ typedef int (*__compar_fn_t) (const void *, const void *);
#define elePtrAt(base, size, idx) (void *)((char *)(base) + (size) * (idx))
typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void *param);
+typedef void (*__ext_swap_fn_t)(void *p1, void *p2, const void *param);
/**
* quick sort, with the compare function requiring additional parameters support
@@ -59,6 +60,38 @@ void taosqsort(void *src, size_t numOfElem, size_t size, const void* param, __ex
*/
void *taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, __compar_fn_t fn, int flags);
+/**
+ * adjust heap
+ *
+ * @param base: the start address of array
+ * @param size: size of every item in array
+ * @param start: the first index
+ * @param end: the last index
+ * @param parcompar: parameters for compare function
+ * @param compar: user defined compare function
+ * @param parswap: parameters for swap function
+ * @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL
+ * @param maxroot: if heap is max root heap
+ * @return
+ */
+void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot);
+
+/**
+ * sort heap to make sure it is a max/min root heap
+ *
+ * @param base: the start address of array
+ * @param size: size of every item in array
+ * @param len: the length of array
+ * @param parcompar: parameters for compare function
+ * @param compar: user defined compare function
+ * @param parswap: parameters for swap function
+ * @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL
+ * @param maxroot: if heap is max root heap
+ * @return
+ */
+void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h
index fc7b6b85841065044a0897ee3ebaa4d7cb84e53b..63cadf39a3c7e968f615c96a61d848d57b1cc6d6 100644
--- a/src/util/inc/tarray.h
+++ b/src/util/inc/tarray.h
@@ -197,8 +197,21 @@ void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t compa
*/
char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int flags);
+
+/**
+ * sort the pointer data in the array
+ * @param pArray
+ * @param compar
+ * @param param
+ * @return
+ */
+
+void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param);
+
#ifdef __cplusplus
}
#endif
+
+
#endif // TDENGINE_TAOSARRAY_H
diff --git a/src/util/inc/tsched.h b/src/util/inc/tsched.h
index 3e481cbc327b495975fb03bc4e4d850e4372f044..a1591512c1f87f524837a7986e3c8b3e14e25924 100644
--- a/src/util/inc/tsched.h
+++ b/src/util/inc/tsched.h
@@ -28,10 +28,41 @@ typedef struct SSchedMsg {
void *thandle;
} SSchedMsg;
-void *taosInitScheduler(int queueSize, int numOfThreads, const char *label);
-void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl);
-int taosScheduleTask(void *qhandle, SSchedMsg *pMsg);
-void taosCleanUpScheduler(void *param);
+/**
+ * Create a thread-safe ring-buffer based task queue and return the instance. A thread
+ * pool will be created to consume the messages in the queue.
+ * @param capacity the queue capacity
+ * @param numOfThreads the number of threads for the thread pool
+ * @param label the label of the queue
+ * @return the created queue scheduler
+ */
+void *taosInitScheduler(int capacity, int numOfThreads, const char *label);
+
+/**
+ * Create a thread-safe ring-buffer based task queue and return the instance.
+ * Same as taosInitScheduler, and it also print the queue status every 1 minite.
+ * @param capacity the queue capacity
+ * @param numOfThreads the number of threads for the thread pool
+ * @param label the label of the queue
+ * @param tmrCtrl the timer controller, tmr_ctrl_t*
+ * @return the created queue scheduler
+ */
+void *taosInitSchedulerWithInfo(int capacity, int numOfThreads, const char *label, void *tmrCtrl);
+
+/**
+ * Clean up the queue scheduler instance and free the memory.
+ * @param queueScheduler the queue scheduler to free
+ */
+void taosCleanUpScheduler(void *queueScheduler);
+
+/**
+ * Schedule a new task to run, the task is described by pMsg.
+ * The function may be blocked if no thread is available to execute the task.
+ * That may happen when all threads are busy.
+ * @param queueScheduler the queue scheduler instance
+ * @param pMsg the message for the task
+ */
+void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
#ifdef __cplusplus
}
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 27091b1fe81f68ee7cc414592af92cd0b9f079f7..c8bd79f118d41dbbf701cce1293dbbbabe4627bf 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -144,6 +144,14 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashObj *pHashObj, SHashEntry*
*/
static void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode);
+/**
+ * Check whether the hash table is empty or not.
+ *
+ * @param pHashObj the hash table object
+ * @return if the hash table is empty or not
+ */
+static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj);
+
/**
* Get the next element in hash table for iterator
* @param pIter
@@ -195,7 +203,16 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp) {
}
}
-int32_t taosHashGetSize(const SHashObj *pHashObj) { return (int32_t)((pHashObj == NULL) ? 0 : pHashObj->size); }
+int32_t taosHashGetSize(const SHashObj *pHashObj) {
+ if (!pHashObj) {
+ return 0;
+ }
+ return (int32_t)atomic_load_64(&pHashObj->size);
+}
+
+static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) {
+ return taosHashGetSize(pHashObj) == 0;
+}
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) {
uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
@@ -281,7 +298,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) {
- if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) {
+ if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) {
return NULL;
}
@@ -338,7 +355,7 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) {
- if (pHashObj == NULL || pHashObj->size <= 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return -1;
}
@@ -405,7 +422,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
}
int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param) {
- if (pHashObj == NULL || pHashObj->size == 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return 0;
}
@@ -478,7 +495,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi
return 0;
}
-void taosHashEmpty(SHashObj *pHashObj) {
+void taosHashClear(SHashObj *pHashObj) {
if (pHashObj == NULL) {
return;
}
@@ -517,7 +534,7 @@ void taosHashCleanup(SHashObj *pHashObj) {
return;
}
- taosHashEmpty(pHashObj);
+ taosHashClear(pHashObj);
tfree(pHashObj->hashList);
// destroy mem block
@@ -535,7 +552,7 @@ void taosHashCleanup(SHashObj *pHashObj) {
// for profile only
int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) {
- if (pHashObj == NULL || pHashObj->size == 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return 0;
}
diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c
index 278683539e3247b4b6dcd43687ac281368a7d31d..54b7e00eb7dd6f31ac8c8e6afa89790846abac5b 100644
--- a/src/util/src/talgo.c
+++ b/src/util/src/talgo.c
@@ -225,3 +225,89 @@ void * taosbsearch(const void *key, const void *base, size_t nmemb, size_t size,
return NULL;
}
+
+void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot)
+{
+ int32_t parent;
+ int32_t child;
+ char *buf;
+
+ if (base && size > 0 && compar) {
+ parent = start;
+ child = 2 * parent + 1;
+
+ if (swap == NULL) {
+ buf = calloc(1, size);
+ if (buf == NULL) {
+ return;
+ }
+ }
+
+ if (maxroot) {
+ while (child <= end) {
+ if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) < 0) {
+ child++;
+ }
+
+ if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) > 0) {
+ break;
+ }
+
+ if (swap == NULL) {
+ doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf);
+ } else {
+ (*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap);
+ }
+
+ parent = child;
+ child = 2 * parent + 1;
+ }
+ } else {
+ while (child <= end) {
+ if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) > 0) {
+ child++;
+ }
+
+ if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) < 0) {
+ break;
+ }
+
+ if (swap == NULL) {
+ doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf);
+ } else {
+ (*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap);
+ }
+
+ parent = child;
+ child = 2 * parent + 1;
+ }
+ }
+
+ if (swap == NULL) {
+ tfree(buf);
+ }
+ }
+}
+
+void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot)
+{
+ int32_t i;
+
+ if (base && size > 0) {
+ for (i = len / 2 - 1; i >= 0; i--) {
+ taosheapadjust(base, size, i, len - 1, parcompar, compar, parswap, swap, maxroot);
+ }
+ }
+
+/*
+ char *buf = calloc(1, size);
+
+ for (i = len - 1; i > 0; i--) {
+ doswap(elePtrAt(base, size, 0), elePtrAt(base, size, i));
+ taosheapadjust(base, size, 0, i - 1, parcompar, compar, parswap, swap, maxroot);
+ }
+
+ tfree(buf);
+*/
+}
+
diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c
index 5e7d9d14da870174964ae56627de96b2955e03f0..fe529edaacea0e4bd32d94465240491863b55abe 100644
--- a/src/util/src/tarray.c
+++ b/src/util/src/tarray.c
@@ -15,6 +15,7 @@
#include "os.h"
#include "tarray.h"
+#include "talgo.h"
void* taosArrayInit(size_t size, size_t elemSize) {
assert(elemSize > 0);
@@ -249,4 +250,62 @@ char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t
return NULL;
}
return *(char**)p;
-}
\ No newline at end of file
+}
+
+static int taosArrayPartition(SArray *pArray, int i, int j, __ext_compar_fn_t fn, const void *userData) {
+ void* key = taosArrayGetP(pArray, i);
+ while (i < j) {
+ while (i < j && fn(taosArrayGetP(pArray, j), key, userData) >= 0) { j--; }
+ if (i < j) {
+ void *a = taosArrayGetP(pArray, j);
+ taosArraySet(pArray, i, &a);
+ }
+ while (i < j && fn(taosArrayGetP(pArray, i), key, userData) <= 0) { i++;}
+ if (i < j) {
+ void *a = taosArrayGetP(pArray, i);
+ taosArraySet(pArray, j, &a);
+ }
+ }
+ taosArraySet(pArray, i, &key);
+ return i;
+}
+
+static void taosArrayQuicksortHelper(SArray *pArray, int low, int high, __ext_compar_fn_t fn, const void *param) {
+ if (low < high) {
+ int idx = taosArrayPartition(pArray, low, high, fn, param);
+ taosArrayQuicksortHelper(pArray, low, idx - 1, fn, param);
+ taosArrayQuicksortHelper(pArray, idx + 1, high, fn, param);
+ }
+}
+
+static void taosArrayQuickSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) {
+ if (pArray->size <= 1) {
+ return;
+ }
+ taosArrayQuicksortHelper(pArray, 0, (int)(taosArrayGetSize(pArray) - 1), fn, param);
+}
+static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) {
+ if (pArray->size <= 1) {
+ return;
+ }
+ for (int i = 1; i <= pArray->size - 1; ++i) {
+ for (int j = i; j > 0; --j) {
+ if (fn(taosArrayGetP(pArray, j), taosArrayGetP(pArray, j - 1), param) == -1) {
+ void *a = taosArrayGetP(pArray, j);
+ void *b = taosArrayGetP(pArray, j - 1);
+ taosArraySet(pArray, j - 1, &a);
+ taosArraySet(pArray, j, &b);
+ } else {
+ break;
+ }
+ }
+ }
+ return;
+
+}
+// order array
+void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param) {
+ taosArrayGetSize(pArray) > 8 ?
+ taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param);
+}
+//TODO(yihaoDeng) add order array
diff --git a/src/util/src/tbuffer.c b/src/util/src/tbuffer.c
index abfa35f42cc9798d3a8a7dd7d6b874705ace88ba..c06d1e59bd537bc009b462f0b17b9cdd00321d20 100644
--- a/src/util/src/tbuffer.c
+++ b/src/util/src/tbuffer.c
@@ -18,6 +18,24 @@
#include "exception.h"
#include "taoserror.h"
+typedef union Un4B {
+ uint32_t ui;
+ float f;
+} Un4B;
+#if __STDC_VERSION__ >= 201112L
+static_assert(sizeof(Un4B) == sizeof(uint32_t), "sizeof(Un4B) must equal to sizeof(uint32_t)");
+static_assert(sizeof(Un4B) == sizeof(float), "sizeof(Un4B) must equal to sizeof(float)");
+#endif
+
+typedef union Un8B {
+ uint64_t ull;
+ double d;
+} Un8B;
+#if __STDC_VERSION__ >= 201112L
+static_assert(sizeof(Un8B) == sizeof(uint64_t), "sizeof(Un8B) must equal to sizeof(uint64_t)");
+static_assert(sizeof(Un8B) == sizeof(double), "sizeof(Un8B) must equal to sizeof(double)");
+#endif
+
////////////////////////////////////////////////////////////////////////////////
// reader functions
@@ -175,13 +193,21 @@ uint64_t tbufReadUint64( SBufferReader* buf ) {
}
float tbufReadFloat( SBufferReader* buf ) {
- uint32_t ret = tbufReadUint32( buf );
- return *(float*)( &ret );
+ Un4B _un;
+ tbufReadToBuffer( buf, &_un, sizeof(_un) );
+ if( buf->endian ) {
+ _un.ui = ntohl( _un.ui );
+ }
+ return _un.f;
}
double tbufReadDouble(SBufferReader* buf) {
- uint64_t ret = tbufReadUint64( buf );
- return *(double*)( &ret );
+ Un8B _un;
+ tbufReadToBuffer( buf, &_un, sizeof(_un) );
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ return _un.d;
}
////////////////////////////////////////////////////////////////////////////////
@@ -381,17 +407,37 @@ void tbufWriteUint64At( SBufferWriter* buf, size_t pos, uint64_t data ) {
}
void tbufWriteFloat( SBufferWriter* buf, float data ) {
- tbufWriteUint32( buf, *(uint32_t*)(&data) );
+ Un4B _un;
+ _un.f = data;
+ if( buf->endian ) {
+ _un.ui = htonl( _un.ui );
+ }
+ tbufWrite( buf, &_un, sizeof(_un) );
}
void tbufWriteFloatAt( SBufferWriter* buf, size_t pos, float data ) {
- tbufWriteUint32At( buf, pos, *(uint32_t*)(&data) );
+ Un4B _un;
+ _un.f = data;
+ if( buf->endian ) {
+ _un.ui = htonl( _un.ui );
+ }
+ tbufWriteAt( buf, pos, &_un, sizeof(_un) );
}
void tbufWriteDouble( SBufferWriter* buf, double data ) {
- tbufWriteUint64( buf, *(uint64_t*)(&data) );
+ Un8B _un;
+ _un.d = data;
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ tbufWrite( buf, &_un, sizeof(_un) );
}
void tbufWriteDoubleAt( SBufferWriter* buf, size_t pos, double data ) {
- tbufWriteUint64At( buf, pos, *(uint64_t*)(&data) );
+ Un8B _un;
+ _un.d = data;
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ tbufWriteAt( buf, pos, &_un, sizeof(_un) );
}
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 442e83bb4f76499d7ce39792fc188d61536910c2..80071986d6d2396ef2aec8f7841b0897cb3d7b26 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -312,6 +312,9 @@ void taosReadGlobalLogCfg() {
#ifdef _TD_POWER_
printf("configDir:%s not there, use default value: /etc/power", configDir);
strcpy(configDir, "/etc/power");
+ #elif (_TD_TQ_ == true)
+ printf("configDir:%s not there, use default value: /etc/tq", configDir);
+ strcpy(configDir, "/etc/tq");
#else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
@@ -327,7 +330,8 @@ void taosReadGlobalLogCfg() {
printf("\nconfig file:%s not found, all variables are set to default\n", fileName);
return;
}
-
+
+ ssize_t _bytes = 0;
size_t len = 1024;
line = calloc(1, len);
@@ -337,7 +341,12 @@ void taosReadGlobalLogCfg() {
option = value = NULL;
olen = vlen = 0;
- tgetline(&line, &len, fp);
+ _bytes = tgetline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
paGetToken(line, &option, &olen);
@@ -373,7 +382,8 @@ bool taosReadGlobalCfg() {
return false;
}
}
-
+
+ ssize_t _bytes = 0;
size_t len = 1024;
line = calloc(1, len);
@@ -383,7 +393,12 @@ bool taosReadGlobalCfg() {
option = value = value2 = value3 = NULL;
olen = vlen = vlen2 = vlen3 = 0;
- tgetline(&line, &len, fp);
+ _bytes = tgetline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
paGetToken(line, &option, &olen);
diff --git a/src/util/src/terror.c b/src/util/src/terror.c
index 4705110ca6ca8eec302bec9bbb9254c9ec8919a3..af70f72b86e43ef2067627748b97fdfd9b593f17 100644
--- a/src/util/src/terror.c
+++ b/src/util/src/terror.c
@@ -207,6 +207,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, "No permission for dis
TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message length")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories")
+TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress")
diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c
index 13df8e4ee68c2751cff63c49c3acfc289e4068f4..4a0208a3d0bf22f21b5f6a05513f435664e746af 100644
--- a/src/util/src/thashutil.c
+++ b/src/util/src/thashutil.c
@@ -126,20 +126,38 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) {
_hash_fn_t fn = NULL;
switch(type) {
case TSDB_DATA_TYPE_TIMESTAMP:
- case TSDB_DATA_TYPE_UBIGINT:
- case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break;
- case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break;
- case TSDB_DATA_TYPE_NCHAR: fn = MurmurHash3_32;break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ case TSDB_DATA_TYPE_BIGINT:
+ fn = taosIntHash_64;
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ fn = MurmurHash3_32;
+ break;
+ case TSDB_DATA_TYPE_NCHAR:
+ fn = MurmurHash3_32;
+ break;
case TSDB_DATA_TYPE_UINT:
- case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break;
- case TSDB_DATA_TYPE_USMALLINT:
- case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break;
- case TSDB_DATA_TYPE_BOOL: fn = taosIntHash_8; break;
+ case TSDB_DATA_TYPE_INT:
+ fn = taosIntHash_32;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ fn = taosIntHash_16;
+ break;
+ case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
- case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break;
- case TSDB_DATA_TYPE_FLOAT: fn = taosFloatHash; break;
- case TSDB_DATA_TYPE_DOUBLE: fn = taosDoubleHash; break;
- default: fn = taosIntHash_32;break;
+ case TSDB_DATA_TYPE_TINYINT:
+ fn = taosIntHash_8;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ fn = taosFloatHash;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ fn = taosDoubleHash;
+ break;
+ default:
+ fn = taosIntHash_32;
+ break;
}
return fn;
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index 7f127fc396a13f0a7796dcb4ce1dd63ce96cb951..45ff14ffa4adcd018cbf7a7d69b8644582855ab3 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -83,8 +83,10 @@ int64_t dbgWSize = 0;
#ifdef _TD_POWER_
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
+#elif (_TD_TQ_ == true)
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq";
#else
-char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/taos";
+char tsLogDir[PATH_MAX] = "/var/log/taos";
#endif
static SLogObj tsLogObj = { .fileNum = 1 };
diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c
index f014dd0fab5494bd85f197e2c79fac53359e8edf..16142470c95678b8663f3bd437357dcdb22635a5 100644
--- a/src/util/src/tsched.c
+++ b/src/util/src/tsched.c
@@ -108,39 +108,47 @@ void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) {
void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) {
SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label);
-
+
if (tmrCtrl != NULL && pSched != NULL) {
pSched->pTmrCtrl = tmrCtrl;
taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer);
}
-
+
return pSched;
}
-void *taosProcessSchedQueue(void *param) {
+void *taosProcessSchedQueue(void *scheduler) {
SSchedMsg msg;
- SSchedQueue *pSched = (SSchedQueue *)param;
+ SSchedQueue *pSched = (SSchedQueue *)scheduler;
+ int ret = 0;
while (1) {
- if (tsem_wait(&pSched->fullSem) != 0) {
- uError("wait %s fullSem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_wait(&pSched->fullSem)) != 0) {
+ uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
}
if (pSched->stop) {
break;
}
- if (pthread_mutex_lock(&pSched->queueMutex) != 0)
- uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) {
+ uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
msg = pSched->queue[pSched->fullSlot];
memset(pSched->queue + pSched->fullSlot, 0, sizeof(SSchedMsg));
pSched->fullSlot = (pSched->fullSlot + 1) % pSched->queueSize;
- if (pthread_mutex_unlock(&pSched->queueMutex) != 0)
- uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) {
+ uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
- if (tsem_post(&pSched->emptySem) != 0)
- uError("post %s emptySem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_post(&pSched->emptySem)) != 0) {
+ uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
if (msg.fp)
(*(msg.fp))(&msg);
@@ -151,30 +159,37 @@ void *taosProcessSchedQueue(void *param) {
return NULL;
}
-int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) {
- SSchedQueue *pSched = (SSchedQueue *)qhandle;
+void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
+ SSchedQueue *pSched = (SSchedQueue *)queueScheduler;
+ int ret = 0;
+
if (pSched == NULL) {
uError("sched is not ready, msg:%p is dropped", pMsg);
- return 0;
+ return;
}
- if (tsem_wait(&pSched->emptySem) != 0) {
- uError("wait %s emptySem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_wait(&pSched->emptySem)) != 0) {
+ uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
}
- if (pthread_mutex_lock(&pSched->queueMutex) != 0)
- uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) {
+ uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
pSched->queue[pSched->emptySlot] = *pMsg;
pSched->emptySlot = (pSched->emptySlot + 1) % pSched->queueSize;
- if (pthread_mutex_unlock(&pSched->queueMutex) != 0)
- uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
-
- if (tsem_post(&pSched->fullSem) != 0)
- uError("post %s fullSem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) {
+ uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
- return 0;
+ if ((ret = tsem_post(&pSched->fullSem)) != 0) {
+ uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
}
void taosCleanUpScheduler(void *param) {
@@ -219,4 +234,4 @@ void taosDumpSchedulerStatus(void *qhandle, void *tmrId) {
}
taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer);
-}
+}
\ No newline at end of file
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index bda52936f90d07b1fde598de9ca683c8a1b8b82a..1a73991ade1ea4617fc4d3dab3904652ff46d691 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -427,13 +427,23 @@ char *taosIpStr(uint32_t ipInt) {
}
FORCE_INLINE float taos_align_get_float(const char* pBuf) {
- float fv = 0;
- *(int32_t*)(&fv) = *(int32_t*)pBuf;
+#if __STDC_VERSION__ >= 201112L
+ static_assert(sizeof(float) == sizeof(uint32_t), "sizeof(float) must equal to sizeof(uint32_t)");
+#else
+ assert(sizeof(float) == sizeof(uint32_t));
+#endif
+ float fv = 0;
+ memcpy(&fv, pBuf, sizeof(fv)); // in ARM, return *((const float*)(pBuf)) may cause problem
return fv;
}
FORCE_INLINE double taos_align_get_double(const char* pBuf) {
- double dv = 0;
- *(int64_t*)(&dv) = *(int64_t*)pBuf;
+#if __STDC_VERSION__ >= 201112L
+ static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) must equal to sizeof(uint64_t)");
+#else
+ assert(sizeof(double) == sizeof(uint64_t));
+#endif
+ double dv = 0;
+ memcpy(&dv, pBuf, sizeof(dv)); // in ARM, return *((const double*)(pBuf)) may cause problem
return dv;
}
diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt
index ee99348cd9db86923f2ba06da9b3452d2dcc0347..a60c6cff2809dcc2a55f5cce3e593ef06045a975 100644
--- a/src/util/tests/CMakeLists.txt
+++ b/src/util/tests/CMakeLists.txt
@@ -1,15 +1,16 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
-
+
LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c)
ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov)
diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt
index 3fefbea05ba763dfa856dd52c195d36ce70ccd91..6238f43d32ad2ed973f522aca3bb5dfca9101435 100644
--- a/src/vnode/CMakeLists.txt
+++ b/src/vnode/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index d770a38e371c9920c438d810f699ab399be15833..ef05cf4a4063625d8e2810503e541fd32a7f8f62 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -41,6 +41,8 @@ typedef struct {
int32_t queuedWMsg;
int32_t queuedRMsg;
int32_t flowctrlLevel;
+ int8_t preClose; // drop and close switch
+ int8_t reserved[3];
int64_t sequence; // for topic
int8_t status;
int8_t role;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 074f0f681faaa89ccf7a14da0da43774a76d647b..f826c1aecd336a0eedeb3f02df0a7acc61895bb2 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return terrno;
}
- char rootDir[TSDB_FILENAME_LEN] = {0};
- sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
-
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
if (tfsMkdir(vnodeDir) < 0) {
@@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return code;
}
- // STsdbCfg tsdbCfg = {0};
- // tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
- // tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
- // tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
- // tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
- // tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
- // tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1;
- // tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2;
- // tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
- // tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
- // tsdbCfg.precision = pVnodeCfg->cfg.precision;
- // tsdbCfg.compression = pVnodeCfg->cfg.compression;
- // tsdbCfg.update = pVnodeCfg->cfg.update;
- // tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow;
-
- // char tsdbDir[TSDB_FILENAME_LEN] = {0};
- // sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId);
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
return TSDB_CODE_VND_INIT_FAILED;
@@ -93,7 +73,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
}
int32_t vnodeSync(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vDebug("vgId:%d, failed to sync, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
@@ -116,7 +96,7 @@ int32_t vnodeSync(int32_t vgId) {
int32_t vnodeDrop(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vDebug("vgId:%d, failed to drop, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
@@ -439,15 +419,16 @@ int32_t vnodeOpen(int32_t vgId) {
}
int32_t vnodeClose(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) return 0;
if (pVnode->dropped) {
vnodeRelease(pVnode);
return 0;
}
+ pVnode->preClose = 1;
+
vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode);
- vnodeRemoveFromHash(pVnode);
vnodeRelease(pVnode);
vnodeCleanUp(pVnode);
diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c
index 5a0bafe82301d568da20cd45b0daeeb37995127d..8b17d3a5f2b8871aa83d4daf81ff936773de736a 100644
--- a/src/vnode/src/vnodeMgmt.c
+++ b/src/vnode/src/vnodeMgmt.c
@@ -125,6 +125,18 @@ void vnodeRelease(void *vparam) {
}
}
+void *vnodeAcquireNotClose(int32_t vgId) {
+ SVnodeObj *pVnode = vnodeAcquire(vgId);
+ if (pVnode != NULL && pVnode->preClose == 1) {
+ vnodeRelease(pVnode);
+ terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
+ vDebug("vgId:%d, not exist, pre closing", vgId);
+ return NULL;
+ }
+
+ return pVnode;
+}
+
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) {
int64_t totalStorage = 0;
int64_t compStorage = 0;
@@ -188,7 +200,7 @@ void vnodeBuildStatusMsg(void *param) {
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId);
- SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(pAccess[i].vgId);
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c
index 4197428fec6b5d24e7791b2a5f8cb7df229cbca5..2bdfd2ead3a31d8c2cba94d93239de965d2e07dc 100644
--- a/src/vnode/src/vnodeSync.c
+++ b/src/vnode/src/vnodeSync.c
@@ -95,7 +95,7 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
}
void vnodeStartSyncFile(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while start filesync", vgId);
return;
@@ -155,7 +155,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
}
int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while write to cache", vgId);
return -1;
diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c
index 555eda6d13eeb1dbbb83fbd89ee2672966aa8539..a7c418711de5bae2e1e98c90a72a1b2a9aa06d6f 100644
--- a/src/vnode/src/vnodeWrite.c
+++ b/src/vnode/src/vnodeWrite.c
@@ -396,10 +396,13 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
}
void vnodeWaitWriteCompleted(SVnodeObj *pVnode) {
+ int32_t extraSleep = 0;
while (pVnode->queuedWMsg > 0) {
vTrace("vgId:%d, queued wmsg num:%d", pVnode->vgId, pVnode->queuedWMsg);
taosMsleep(10);
+ extraSleep = 1;
}
- taosMsleep(900);
+ if (extraSleep)
+ taosMsleep(900);
}
diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt
index a89024dab5060b1f18174f769e0d70c00ad00faf..0d9be42bd5d54ddd1fdd372511e4f98fb7d6355b 100644
--- a/src/wal/CMakeLists.txt
+++ b/src/wal/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE)
diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt
index 071ff6fdba084b7bd9a4f6f01c43eac06c774b29..c5bc4198f10d48caf2ea133c475ea99c8e7a2fd2 100644
--- a/src/wal/test/CMakeLists.txt
+++ b/src/wal/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX)
diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c
index 9a52a2ca833e671fe96acf6800f15e44fba292e3..505728fbe4c4a6fbc126aa18ff6db93a28388173 100644
--- a/src/wal/test/waltest.c
+++ b/src/wal/test/waltest.c
@@ -19,6 +19,7 @@
#include "tglobal.h"
#include "tlog.h"
#include "twal.h"
+#include "tfile.h"
int64_t ver = 0;
void *pWal = NULL;
@@ -36,7 +37,7 @@ int writeToQueue(void *pVnode, void *data, int type, void *pMsg) {
}
int main(int argc, char *argv[]) {
- char path[128] = "/home/jhtao/test/wal";
+ char path[128] = "/tmp/wal";
int level = 2;
int total = 5;
int rows = 10000;
@@ -72,9 +73,11 @@ int main(int argc, char *argv[]) {
printf(" [-h help]: print out this help\n\n");
exit(0);
}
- }
+ }
taosInitLog("wal.log", 100000, 10);
+ tfInit();
+ walInit();
SWalCfg walCfg = {0};
walCfg.walLevel = level;
@@ -122,13 +125,13 @@ int main(int argc, char *argv[]) {
printf("index:%" PRId64 " wal:%s\n", index, name);
if (code == 0) break;
-
- index++;
}
getchar();
walClose(pWal);
+ walCleanUp();
+ tfCleanup();
return 0;
}
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 4e7e9a87ea6810c362bd676cd9152f61bc08e29d..e21905af3b88cd6628c5b83471ff70013dc996fc 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -3,7 +3,7 @@
# generate release version:
# mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release ..
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11)
diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile
index 0c1f651059714e6e32c3ec7e0a74ed22e05a6f3e..c75427b5f4e568553dbcd9e2686f529a2745c029 100644
--- a/tests/Jenkinsfile
+++ b/tests/Jenkinsfile
@@ -110,16 +110,8 @@ pipeline {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
- mvn clean package assembly:single -DskipTests >/dev/null
- java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
- '''
- }
- catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- sh '''
- cd ${WKC}/src/connector/jdbc
- mvn clean package -Dmaven.test.skip=true >/dev/null
- cd ${WKC}/tests/examples/JDBC/JDBCDemo/
- java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
+ mvn clean package >/dev/null
+ java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
diff --git a/tests/comparisonTest/cassandra/cassandratest/pom.xml b/tests/comparisonTest/cassandra/cassandratest/pom.xml
index 8eeb5c3aa092ba360256a0e02ccdd9cead113b95..00630d93d197379e04268ef940a8e4db282d8186 100644
--- a/tests/comparisonTest/cassandra/cassandratest/pom.xml
+++ b/tests/comparisonTest/cassandra/cassandratest/pom.xml
@@ -75,7 +75,7 @@
junit
junit
- 4.11
+ 4.13.1
test
diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
index e0ada8b763eca8208260ebde73ce8fb192e917db..b55a136c7393c2faa857edec801a27721a1eff20 100644
--- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
+++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
@@ -87,14 +87,14 @@
junit
junit
- 4.11
+ 4.13.1
test
com.google.guava
guava
- 29.0-jre
+ 30.0-jre
diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt
index 36ed3efe191c9d949d6234bd61ffbbe28c3a33d2..0f389c4c0cefd10fe829d86342bc391cffe37901 100644
--- a/tests/comparisonTest/tdengine/CMakeLists.txt
+++ b/tests/comparisonTest/tdengine/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX)
diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
index 64a91b951bafe65e2f685fb57091221760fb99f9..eac3dec0a92a4c8aa519cd426b9c8d3895047be6 100644
--- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
+++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
@@ -40,7 +40,7 @@
junit
junit
- 4.13
+ 4.13.1
test
diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml
index 84467003f905c454bb285088e56bcd26465a3e5e..34518900ed30f48effd47a8786233080f3e5291f 100644
--- a/tests/examples/JDBC/connectionPools/pom.xml
+++ b/tests/examples/JDBC/connectionPools/pom.xml
@@ -4,17 +4,22 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0
+
+ 1.8
+ 1.8
+
+
com.taosdata.demo
connectionPools
1.0-SNAPSHOT
+
com.taosdata.jdbc
taos-jdbcdriver
2.0.18
-
com.alibaba
@@ -46,9 +51,15 @@
- log4j
- log4j
- 1.2.17
+ org.apache.logging.log4j
+ log4j-core
+ 2.14.1
+
+
+
+ com.cloudhopper.proxool
+ proxool
+ 0.9.1
@@ -57,28 +68,49 @@
org.apache.maven.plugins
maven-assembly-plugin
- 3.1.0
-
-
-
- com.taosdata.example.ConnectionPoolDemo
-
-
-
- jar-with-dependencies
-
-
+ 3.3.0
- make-assembly
+ ConnectionPoolDemo
+
+ ConnectionPoolDemo
+
+
+ com.taosdata.example.ConnectionPoolDemo
+
+
+
+ jar-with-dependencies
+
+
+ package
+
+ single
+
+
+
+
+ ProxoolDemo
+
+ ProxoolDemo
+
+
+ com.taosdata.example.ProxoolDemo
+
+
+
+ jar-with-dependencies
+
+
package
single
+
-
\ No newline at end of file
+
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
index bd57d138b21034f45569ab3dcfc8e1ad5b39263d..96ad65aa4fc10bf81f6107a4bb2e5a4224891298 100644
--- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
@@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder;
import com.taosdata.example.pool.DbcpBuilder;
import com.taosdata.example.pool.DruidPoolBuilder;
import com.taosdata.example.pool.HikariCpBuilder;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit;
public class ConnectionPoolDemo {
- private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
+ private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class);
private static final String dbName = "pool_test";
private static String poolType = "hikari";
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java
new file mode 100644
index 0000000000000000000000000000000000000000..632ad8c9bf69d13d137d06c1f23c964904c8e050
--- /dev/null
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java
@@ -0,0 +1,56 @@
+package com.taosdata.example;
+
+import org.logicalcobwebs.proxool.ProxoolException;
+import org.logicalcobwebs.proxool.configuration.JAXPConfigurator;
+
+import java.sql.*;
+
+public class ProxoolDemo {
+
+
+ public static void main(String[] args) {
+
+ String xml = parseConfigurationXml(args);
+ if (xml == null) {
+ printHelp();
+ System.exit(0);
+ }
+
+ try {
+ JAXPConfigurator.configure(xml, false);
+ Class.forName("org.logicalcobwebs.proxool.ProxoolDriver");
+ Connection connection = DriverManager.getConnection("proxool.ds");
+
+ Statement stmt = connection.createStatement();
+
+ ResultSet rs = stmt.executeQuery("show databases");
+ ResultSetMetaData metaData = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ System.out.print(metaData.getColumnLabel(i) + ": " + rs.getString(i));
+ }
+ System.out.println();
+ }
+
+ stmt.close();
+
+ } catch (ClassNotFoundException | SQLException | ProxoolException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private static String parseConfigurationXml(String[] args) {
+ String host = null;
+ for (int i = 0; i < args.length; i++) {
+ if ("--xml".equalsIgnoreCase(args[i]) && i < args.length - 1) {
+ host = args[++i];
+ }
+ }
+ return host;
+ }
+
+ private static void printHelp() {
+ System.out.println("Usage: java -jar ProxoolDemo.jar --xml [xml]");
+ }
+
+}
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
index da7c9a22b5b3e7f5d877a3a1489d55f439bff883..f8f1555c08f1f5847bf0a34a56341ef6d22dde50 100644
--- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
@@ -1,6 +1,7 @@
package com.taosdata.example.common;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@@ -10,7 +11,7 @@ import java.util.Random;
public class InsertTask implements Runnable {
private final Random random = new Random(System.currentTimeMillis());
- private static final Logger logger = Logger.getLogger(InsertTask.class);
+ private static final Logger logger = LogManager.getLogger(InsertTask.class);
private final DataSource ds;
private final String dbName;
diff --git a/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml
new file mode 100644
index 0000000000000000000000000000000000000000..67baa1c3931aa57591af8fc306ed441328606978
--- /dev/null
+++ b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml
@@ -0,0 +1,27 @@
+
+
+
+ ds
+
+ jdbc:TAOS-RS://127.0.0.1:6041/log
+
+ com.taosdata.jdbc.rs.RestfulDriver
+
+
+
+
+
+
+ 100
+
+ 100
+
+ 1
+
+ 5
+
+ 30000
+
+ select server_status()
+
+
\ No newline at end of file
diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml
index a83d0a00e69bdcffff2db8b17c763959dc67365b..ad6a63e800fb73dd3c768a8aca941f70cec235b3 100644
--- a/tests/examples/JDBC/mybatisplus-demo/pom.xml
+++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml
@@ -68,7 +68,7 @@
junit
junit
- 4.12
+ 4.13.1
test
diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml
index 22c2f3b63e82a3a2cdcc3093c3c43b98ab534a4b..91b976c2ae6c76a5ae2d7b76c3b90d05e4dae57f 100644
--- a/tests/examples/JDBC/taosdemo/pom.xml
+++ b/tests/examples/JDBC/taosdemo/pom.xml
@@ -4,7 +4,7 @@
4.0.0
com.taosdata
taosdemo
- 2.0
+ 2.0.1
taosdemo
jar
Demo project for TDengine
@@ -81,20 +81,20 @@
mysql
mysql-connector-java
- 5.1.47
+ 8.0.16
test
- log4j
- log4j
- 1.2.17
+ org.apache.logging.log4j
+ log4j-core
+ 2.14.1
junit
junit
- 4.12
+ 4.13.1
test
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
index c361df82b0aebb0d804b1a0982a0c1cf44ef5953..d4f5ff26886b9f90a4235d47bfd004dae9de93f6 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
@@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.io.IOException;
@@ -20,7 +21,7 @@ import java.util.Map;
public class TaosDemoApplication {
- private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
+ private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
public static void main(String[] args) throws IOException {
// 读配置参数
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
index 421a2dea1f4b49786d57b5579ca849976708791e..9340fc3fdd0ce7242d4121a5fb259af48f7ada5f 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
@@ -1,14 +1,15 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.Map;
public class DatabaseMapperImpl implements DatabaseMapper {
- private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
index 90b0990a2bf2e9a9bd2738deec17a284c0868280..db0d43ff05a56b673ed08a522b645d3388f8e091 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
@@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
@@ -11,7 +12,7 @@ import java.util.List;
public class SubTableMapperImpl implements SubTableMapper {
- private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
public SubTableMapperImpl(DataSource dataSource) {
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
index efa9a1f39ea41dd1aba65ab610eae095a3164533..658a403a0ca3883831bca1ad2b6d579ef4713f7d 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
@@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class SuperTableMapperImpl implements SuperTableMapper {
- private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class);
private JdbcTemplate jdbcTemplate;
public SuperTableMapperImpl(DataSource dataSource) {
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
index b049fbe197034ffcd8801b9c4f5e5ff8dbbcc0e0..16bc094848f6ff585e826bf3181cc4e8c03ee822 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
@@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.domain.TableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import java.util.List;
public class TableMapperImpl implements TableMapper {
- private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(TableMapperImpl.class);
private JdbcTemplate template;
@Override
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
index cea98a1c5d350ed22ed5d26c72fedb212dcb7f26..b0a79dea78f429d85804bae4cb0bbec9e712ec1a 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
@@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.util.ArrayList;
@@ -20,7 +21,7 @@ import java.util.stream.IntStream;
public class SubTableService extends AbstractService {
private SubTableMapper mapper;
- private static final Logger logger = Logger.getLogger(SubTableService.class);
+ private static final Logger logger = LogManager.getLogger(SubTableService.class);
public SubTableService(DataSource datasource) {
this.mapper = new SubTableMapperImpl(datasource);
diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c
index 0f24df0f4767fe1cdace072425768473ffcaa88f..a377bbc7b47e1a58d4b3294b88386a9c4fb74e47 100644
--- a/tests/examples/c/apitest.c
+++ b/tests/examples/c/apitest.c
@@ -12,7 +12,7 @@ static void prepare_data(TAOS* taos) {
result = taos_query(taos, "drop database if exists test;");
taos_free_result(result);
usleep(100000);
- result = taos_query(taos, "create database test;");
+ result = taos_query(taos, "create database test precision 'us';");
taos_free_result(result);
usleep(100000);
taos_select_db(taos, "test");
@@ -949,13 +949,45 @@ void verify_stream(TAOS* taos) {
taos_close_stream(strm);
}
+int32_t verify_schema_less(TAOS* taos) {
+ TAOS_RES *result;
+ result = taos_query(taos, "drop database if exists test;");
+ taos_free_result(result);
+ usleep(100000);
+ result = taos_query(taos, "create database test precision 'us';");
+ taos_free_result(result);
+ usleep(100000);
+
+ taos_select_db(taos, "test");
+ result = taos_query(taos, "create stable ste(ts timestamp, f int) tags(t1 bigint)");
+ taos_free_result(result);
+ usleep(100000);
+
+ char* lines[] = {
+ "st,t1=3i,t2=4,t3=\"t3\" c1=3i,c3=L\"passit\",c2=false,c4=4 1626006833639000000",
+ "st,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5 1626006833640000000",
+ "ste,t2=5,t3=L\"ste\" c1=true,c2=4,c3=\"iam\" 1626056811823316532",
+ "st,t1=4i,t2=5,t3=\"t4\" c1=3i,c3=L\"passitagain\",c2=true,c4=5 1626006833642000000",
+ "ste,t2=5,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532",
+ "ste,t2=5,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32b,c6=64s,c7=32w,c8=88.88f 1626056812843316532",
+ "st,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5,c6=7u 1626006933640000000",
+ "stf,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin\",c2=true,c4=5,c5=5,c6=7u 1626006933640000000",
+ "stf,t1=4i,t3=\"t4\",t2=5,t4=5 c1=3i,c3=L\"passitagin_stf\",c2=false,c5=5,c6=7u 1626006933641a"
+ };
+
+// int code = taos_insert_lines(taos, lines , sizeof(lines)/sizeof(char*));
+ int code = taos_insert_lines(taos, &lines[0], 1);
+ code = taos_insert_lines(taos, &lines[1], 1);
+
+ return code;
+}
+
int main(int argc, char *argv[]) {
const char* host = "127.0.0.1";
const char* user = "root";
const char* passwd = "taosdata";
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
-
TAOS* taos = taos_connect(host, user, passwd, "", 0);
if (taos == NULL) {
printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
@@ -967,6 +999,12 @@ int main(int argc, char *argv[]) {
info = taos_get_client_info(taos);
printf("client info: %s\n", info);
+ printf("************ verify shemaless *************\n");
+ int code = verify_schema_less(taos);
+ if (code == 0) {
+ return code;
+ }
+
printf("************ verify query *************\n");
verify_query(taos);
diff --git a/tests/pytest/alter/alter_keep.py b/tests/pytest/alter/alter_keep.py
index 72ca635ac3df60eb0caaf206220eea279420be5b..b23f364fc6f16973f8e7b5e6159f95718df9b91b 100644
--- a/tests/pytest/alter/alter_keep.py
+++ b/tests/pytest/alter/alter_keep.py
@@ -173,8 +173,9 @@ class TDTestCase:
tdSql.checkData(0,7,'10,10,10')
tdSql.error('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
- tdSql.checkRows(rowNum)
+ tdSql.checkRows(2)
+ rowNum = 2
tdLog.notice('testing keep will be altered if sudden change from small to big')
for i in range(30):
tdSql.execute('alter database db keep 14,14,14')
@@ -182,14 +183,19 @@ class TDTestCase:
tdSql.execute('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
rowNum += 1
- tdSql.checkRows(rowNum )
+ tdSql.checkRows(rowNum)
tdLog.notice('testing keep will be altered if sudden change from big to small')
tdSql.execute('alter database db keep 16,16,16')
tdSql.execute('alter database db keep 14,14,14')
tdSql.error('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
- tdSql.checkRows(rowNum)
+ tdSql.checkRows(2)
+
+ tdLog.notice('testing data will show up again when keep is being changed to large value')
+ tdSql.execute('alter database db keep 40,40,40')
+ tdSql.query('select * from tb')
+ tdSql.checkRows(63)
diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp
index b42015a05323b4082d7bfaebe403146fc15901df..376567b7e80cbb4544d48b0e28c5d6404b6db468 100644
--- a/tests/pytest/crash_gen/valgrind_taos.supp
+++ b/tests/pytest/crash_gen/valgrind_taos.supp
@@ -17722,4 +17722,24 @@
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:__libc_alloc_buffer_allocate
+ fun:alloc_buffer_allocate
+ fun:__resolv_conf_allocate
+ fun:__resolv_conf_load
+ fun:__resolv_conf_get_current
+ fun:__res_vinit
+ fun:maybe_init
+ fun:context_get
+ fun:__resolv_context_get
+ fun:gaih_inet.constprop.7
+ fun:getaddrinfo
+ fun:taosGetFqdn
+ fun:taosCheckGlobalCfg
+ fun:taos_init_imp
}
\ No newline at end of file
diff --git a/tests/pytest/dbmgmt/nanoSecondCheck.py b/tests/pytest/dbmgmt/nanoSecondCheck.py
index 27050a2213f7e6bddeb5cc6135c7fe4760018f61..a5e9adacee53a9172a2d8990ccc4d83feb983bdd 100644
--- a/tests/pytest/dbmgmt/nanoSecondCheck.py
+++ b/tests/pytest/dbmgmt/nanoSecondCheck.py
@@ -99,6 +99,15 @@ class TDTestCase:
tdSql.query('select avg(speed) from tb interval(100000000b)')
tdSql.checkRows(4)
+ tdSql.error('select avg(speed) from tb interval(1b);')
+ tdSql.error('select avg(speed) from tb interval(999b);')
+
+ tdSql.query('select avg(speed) from tb interval(1000b);')
+ tdSql.checkRows(5)
+
+ tdSql.query('select avg(speed) from tb interval(1u);')
+ tdSql.checkRows(5)
+
tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);')
tdSql.checkRows(4)
diff --git a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py
index ee663f89b0a6dd776c80033f177f63ec843eaa1e..43e281f43769f59c2384fed43d00868c10a05342 100644
--- a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py
+++ b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py
@@ -12,9 +12,6 @@
# -*- coding: utf-8 -*-
from basic import *
-from util.sql import tdSql
-
-
class TDTestCase:
@@ -36,4 +33,6 @@ td = TDTestCase()
td.init()
+## usage: python3 OneMnodeMultipleVnodesTest.py
+
diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py
index 50914b0be9428ab77f479c9a18a099ecbd0a2d51..871d69790d328f3dcea9fdfdac27a6abc3bb14bd 100644
--- a/tests/pytest/dockerCluster/basic.py
+++ b/tests/pytest/dockerCluster/basic.py
@@ -44,7 +44,16 @@ class BuildDockerCluser:
"jnidebugFlag":"135",
"qdebugFlag":"135",
"maxSQLLength":"1048576"
- }
+ }
+ cmd = "mkdir -p %s" % self.dockerDir
+ self.execCmd(cmd)
+
+ cmd = "cp *.yml %s" % self.dockerDir
+ self.execCmd(cmd)
+
+ cmd = "cp Dockerfile %s" % self.dockerDir
+ self.execCmd(cmd)
+
# execute command, and return the output
# ref: https://blog.csdn.net/wowocpp/article/details/80775650
@@ -81,7 +90,7 @@ class BuildDockerCluser:
def removeFile(self, rootDir, index, dir):
cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir)
self.execCmd(cmd)
-
+
def clearEnv(self):
cmd = "cd %s && docker-compose down --remove-orphans" % self.dockerDir
self.execCmd(cmd)
@@ -108,10 +117,14 @@ class BuildDockerCluser:
self.execCmd(cmd)
def updateLocalhosts(self):
- cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts"
+ cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts | sed 's: ::g'"
result = self.execCmdAndGetOutput(cmd)
- if result and not result.isspace():
+ print(result)
+ if result is None or result.isspace():
+ print("==========")
cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts"
+ display = "echo %s" % cmd
+ self.execCmd(display)
self.execCmd(cmd)
def deploy(self):
@@ -138,13 +151,13 @@ class BuildDockerCluser:
if self.numOfNodes < 2 or self.numOfNodes > 10:
print("the number of nodes must be between 2 and 10")
exit(0)
- self.clearEnv()
- self.createDirs()
self.updateLocalhosts()
self.deploy()
def run(self):
- cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir)
+ cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir)
+ display = "echo %s" % cmd
+ self.execCmd(display)
self.execCmd(cmd)
self.getConnection()
self.createDondes()
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index c66ccc547740b8514984debe5aab05c5ed844254..1f45cab13a9187b8cf16a573bb73dfb64ce7541b 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -150,6 +150,7 @@ python3 ./test.py -f import_merge/importCSV.py
#======================p2-start===============
# tools
python3 test.py -f tools/taosdumpTest.py
+python3 test.py -f tools/taosdumpTest2.py
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
@@ -235,8 +236,10 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
-# python3 ./test.py -f query/nestedQuery/queryInterval.py
+python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py -f query/queryStateWindow.py
+python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
+python3 ./test.py -f query/nestquery_last_row.py
#stream
@@ -333,6 +336,7 @@ python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py
python3 ./test.py -f insert/unsignedSmallint.py
python3 ./test.py -f insert/unsignedTinyint.py
+python3 ./test.py -f insert/insertFromCSV.py
python3 ./test.py -f query/filterAllUnsignedIntTypes.py
python3 ./test.py -f tag_lite/unsignedInt.py
@@ -342,12 +346,14 @@ python3 ./test.py -f tag_lite/unsignedTinyint.py
python3 ./test.py -f functions/function_percentile2.py
python3 ./test.py -f insert/boundary2.py
+python3 ./test.py -f insert/insert_locking.py
python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py
python3 test.py -f alter/alter_keep.py
diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py
index 2c85e1bbdd088ecc61eb063f6567d12f1faeebfe..4e876cc270b98e3bb5186e07d30b6cf60c5ee298 100644
--- a/tests/pytest/functions/function_irate.py
+++ b/tests/pytest/functions/function_irate.py
@@ -27,6 +27,7 @@ class TDTestCase:
self.rowNum = 100
self.ts = 1537146000000
self.ts1 = 1537146000000000
+ self.ts2 = 1597146000000
def run(self):
@@ -35,6 +36,8 @@ class TDTestCase:
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
tdSql.execute("create table test1 using test tags('beijing', 10)")
+ tdSql.execute("create table test2 using test tags('tianjing', 20)")
+ tdSql.execute("create table test3 using test tags('shanghai', 20)")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
@@ -48,6 +51,10 @@ class TDTestCase:
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
@@ -69,7 +76,7 @@ class TDTestCase:
tdSql.execute("insert into gtest8 values(1537146000002,4);")
tdSql.execute("insert into gtest8 values(1537146002202,4);")
- # irate verifacation
+ # irate verifacation --child table'query
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
@@ -99,6 +106,32 @@ class TDTestCase:
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
+ # irate verifacation --super table'query
+ tdSql.query("select irate(col1) from test group by tbname,loc,tag1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(1, 1, "test2")
+ tdSql.checkData(2, 2, "shanghai")
+
+ # add function testcase of twa: query from super table
+ tdSql.query("select twa(col1) from test group by tbname,loc,tag1;")
+ tdSql.checkData(0, 0, 50.5)
+ tdSql.checkData(1, 1, "test2")
+ tdSql.checkData(2, 2, "shanghai")
+
+ # error: function of irate and twa has invalid operation
+ tdSql.error("select irate(col7) from test group by tbname,loc,tag1;")
+ tdSql.error("select irate(col7) from test group by tbname;")
+ tdSql.error("select irate(col1) from test group by loc,tbname,tag1;")
+ # tdSql.error("select irate(col1) from test group by tbname,col7;")
+ tdSql.error("select irate(col1) from test group by col7,tbname;")
+ tdSql.error("select twa(col7) from test group by tbname,loc,tag1;")
+ tdSql.error("select twa(col7) from test group by tbname;")
+ tdSql.error("select twa(col1) from test group by loc,tbname,tag1;")
+ # tdSql.error("select twa(col1) from test group by tbname,col7;")
+ tdSql.error("select twa(col1) from test group by col7,tbname;")
+
+
+ # general table'query
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py
index 263c8a78aa75103c7507422e92f6e20cc0151cd0..3f2e1a03cad0a74c665341ac04250ec8a239ad6f 100644
--- a/tests/pytest/insert/in_function.py
+++ b/tests/pytest/insert/in_function.py
@@ -18,7 +18,6 @@ from util.log import *
from util.cases import *
from util.sql import *
-
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -27,6 +26,7 @@ class TDTestCase:
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-4568
+ # test case for https://jira.taosdata.com:18080/browse/TD-4824
tdLog.info("=============== step1,check bool and tinyint data type")
@@ -137,8 +137,28 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
+ tdLog.info("=============== step1.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_1
+ where in_bool in (true,false) and in_tinyint in (0,127,-127)
+ and tin_bool in (true,false) and tin_tinyint in (0,127,-127)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'True')
+ tdSql.checkData(0,2,'0')
+ tdSql.checkData(0,3,'False')
+ tdSql.checkData(0,4,'0')
+ tdSql.checkData(1,1,'False')
+ tdSql.checkData(1,2,'127')
+ tdSql.checkData(1,3,'False')
+ tdSql.checkData(1,4,'-127')
+ tdSql.checkData(2,1,'True')
+ tdSql.checkData(2,2,'-127')
+ tdSql.checkData(2,3,'True')
+ tdSql.checkData(2,4,'127')
+
- tdLog.info("=============== step1.3,drop normal table && create table")
+ tdLog.info("=============== step1.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_bool_tinyint_1 ;'
cmd2 = 'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; '
tdLog.info(cmd1)
@@ -147,7 +167,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step1.4,insert normal table right data and check in function")
+ tdLog.info("=============== step1.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_bool_tinyint_1 values(now,\'true\',\'-127\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -175,6 +195,17 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
+ cmd4 = '''select * from normal_in_bool_tinyint_1
+ where in_bool in (true,false) and in_tinyint in (0,127,-127)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'True')
+ tdSql.checkData(0,2,'0')
+ tdSql.checkData(1,1,'False')
+ tdSql.checkData(1,2,'127')
+ tdSql.checkData(2,1,'True')
+ tdSql.checkData(2,2,'-127')
tdLog.info("=============== step2,check int、smallint and bigint data type")
@@ -378,10 +409,39 @@ class TDTestCase:
tdSql.query('select * from in_int_smallint_bigint_3 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
- tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,3,'-9223372036854775807')
+
+
+ tdLog.info("=============== step2.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_2
+ where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
+ and in_big in (0,9223372036854775807,-9223372036854775807)
+ and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767)
+ and tin_big in (0,9223372036854775807,-9223372036854775807)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'0')
+ tdSql.checkData(0,2,'32767')
+ tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,4,'0')
+ tdSql.checkData(0,5,'32767')
+ tdSql.checkData(0,6,'-9223372036854775807')
+ tdSql.checkData(1,1,'-2147483647')
+ tdSql.checkData(1,2,'0')
+ tdSql.checkData(1,3,'9223372036854775807')
+ tdSql.checkData(1,4,'-2147483647')
+ tdSql.checkData(1,5,'0')
+ tdSql.checkData(1,6,'9223372036854775807')
+ tdSql.checkData(2,1,'2147483647')
+ tdSql.checkData(2,2,'-32767')
+ tdSql.checkData(2,3,'0')
+ tdSql.checkData(2,4,'2147483647')
+ tdSql.checkData(2,5,'-32767')
+ tdSql.checkData(2,6,'0')
- tdLog.info("=============== step2.3,drop normal table && create table")
+ tdLog.info("=============== step2.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_int_smallint_bigint_1 ;'
cmd2 = 'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; '
tdLog.info(cmd1)
@@ -390,7 +450,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step2.4,insert normal table right data and check in function")
+ tdLog.info("=============== step2.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_int_smallint_bigint_1 values(now,\'2147483647\',\'-32767\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -437,7 +497,23 @@ class TDTestCase:
tdSql.query('select * from normal_int_smallint_bigint_1 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
- tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,3,'-9223372036854775807')
+
+ cmd4 = '''select * from normal_int_smallint_bigint_1
+ where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
+ and in_big in (0,9223372036854775807,-9223372036854775807)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'0')
+ tdSql.checkData(0,2,'32767')
+ tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(1,1,'-2147483647')
+ tdSql.checkData(1,2,'0')
+ tdSql.checkData(1,3,'9223372036854775807')
+ tdSql.checkData(2,1,'2147483647')
+ tdSql.checkData(2,2,'-32767')
+ tdSql.checkData(2,3,'0')
tdLog.info("=============== step3,check binary and nchar data type")
@@ -560,7 +636,30 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
- tdLog.info("=============== step3.3,drop normal table && create table")
+ tdLog.info("=============== step3.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_3
+ where in_binary in (\'0\',\'TDengine\',\'TAOS\')
+ and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
+ and tin_binary in (\'0\',\'TDengine\',\'taosdataTDengine\')
+ and tin_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'北京涛思数据科技有限公司TDengine\')
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'TDengine')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(0,3,'taosdataTDengine')
+ tdSql.checkData(0,4,'北京涛思数据科技有限公司TDengine')
+ tdSql.checkData(1,1,'TAOS')
+ tdSql.checkData(1,2,'涛思数据TAOSdata')
+ tdSql.checkData(1,3,'TDengine')
+ tdSql.checkData(1,4,'北京涛思数据科技有限公司')
+ tdSql.checkData(2,1,'0')
+ tdSql.checkData(2,2,'0')
+ tdSql.checkData(2,3,'0')
+ tdSql.checkData(2,4,'0')
+
+
+ tdLog.info("=============== step3.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_binary_nchar_1 ;'
cmd2 = 'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; '
tdLog.info(cmd1)
@@ -569,7 +668,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step3.4,insert normal table right data and check in function")
+ tdLog.info("=============== step3.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_binary_nchar_1 values(now,\'0\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -598,124 +697,413 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.query('select * from normal_in_binary_nchar_1 where in_nchar in (\'北京涛思数据科技有限公司\') order by ts desc')
tdSql.checkData(0,1,'TDengine')
- tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
- tdLog.info("=============== step4,check float and double data type,not support")
+ cmd4 = '''select * from normal_in_binary_nchar_1
+ where in_binary in (\'0\',\'TDengine\',\'TAOS\')
+ and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'TDengine')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(1,1,'TAOS')
+ tdSql.checkData(1,2,'涛思数据TAOSdata')
+ tdSql.checkData(2,1,'0')
+ tdSql.checkData(2,2,'0')
+
+
+ tdLog.info("=============== step4,check float and double data type")
tdLog.info("=============== step4.1,drop table && create table")
- cmd1 = 'drop table if exists in_float_double_1 ;'
+ cmd1 = 'drop table if exists in_ts_float_double_1 ;'
+ cmd2 = 'drop table if exists in_ts_float_double_2 ;'
+ cmd3 = 'drop table if exists in_ts_float_double_3 ;'
cmd10 = 'drop table if exists in_stable_4 ;'
- cmd11 = 'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;'
- cmd12 = 'create table in_float_double_1 using in_stable_4 tags(\'666\',\'88888\') ; '
+ cmd11 = 'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;'
+ cmd12 = 'create table in_ts_float_double_1 using in_stable_4 tags(\'0\',\'0\',\'0\') ; '
+ cmd13 = 'create table in_ts_float_double_2 using in_stable_4 tags(\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ; '
+ cmd14 = 'create table in_ts_float_double_3 using in_stable_4 tags(\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
+ tdLog.info(cmd2)
+ tdSql.execute(cmd2)
+ tdLog.info(cmd3)
+ tdSql.execute(cmd3)
tdLog.info(cmd10)
tdSql.execute(cmd10)
tdLog.info(cmd11)
tdSql.execute(cmd11)
tdLog.info(cmd12)
tdSql.execute(cmd12)
+ tdLog.info(cmd13)
+ tdSql.execute(cmd13)
+ tdLog.info(cmd14)
+ tdSql.execute(cmd14)
tdLog.info("=============== step4.2,insert stable right data and check in function")
- cmd1 = 'insert into in_float_double_1 values(now,\'888\',\'66666\') ;'
+ cmd1 = 'insert into in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
- tdSql.execute(cmd1)
+ tdSql.execute(cmd1)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+
+ tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
- cmd2 = 'select * from in_stable_4 where in_float in (\'888\');'
+ cmd2 = 'insert into in_ts_float_double_2 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
tdLog.info(cmd2)
- tdSql.error(cmd2)
- try:
- tdSql.execute(cmd2)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd2)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'2020-01-01 08:00:00.001000\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+
+ tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
- cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');'
+ cmd3 = 'insert into in_ts_float_double_3 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
tdLog.info(cmd3)
- tdSql.error(cmd3)
- try:
- tdSql.execute(cmd3)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');'
- tdLog.info(cmd4)
- tdSql.error(cmd4)
- try:
- tdSql.execute(cmd4)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');'
- tdLog.info(cmd5)
- tdSql.error(cmd5)
- try:
- tdSql.execute(cmd5)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');'
- tdLog.info(cmd6)
- tdSql.error(cmd6)
- try:
- tdSql.execute(cmd6)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');'
- tdLog.info(cmd7)
- tdSql.error(cmd7)
- try:
- tdSql.execute(cmd7)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd3)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'2021-01-01 08:00:00.001000\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+
+ tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+
+ tdLog.info("=============== step4.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_4
+ where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and in_float in (0.00000,666.00000,-888.00000)
+ and in_double in (0.000000000,66666.000000000,-88888.000000000)
+ and tin_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and tin_float in (0.00000,666.00000,-888.00000)
+ and tin_double in (0.000000000,66666.000000000,-88888.000000000)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.checkData(1,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(1,2,666.00000)
+ tdSql.checkData(1,3,-88888.000000000)
+ tdSql.checkData(1,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(1,5,666.00000)
+ tdSql.checkData(1,6,-88888.000000000)
+ tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,2,0.00000)
+ tdSql.checkData(2,3,0.000000000)
+ tdSql.checkData(2,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,5,0.00000)
+ tdSql.checkData(2,6,0.000000000)
+
+
- tdLog.info("=============== step4.3,drop normal table && create table")
- cmd1 = 'drop table if exists normal_in_float_double_1 ;'
- cmd2 = 'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; '
+ tdLog.info("=============== step4.4,drop normal table && create table")
+ cmd1 = 'drop table if exists normal_in_ts_float_double_1 ;'
+ cmd2 = 'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdLog.info(cmd2)
tdSql.execute(cmd2)
- tdLog.info("=============== step4.4,insert normal table right data and check in function")
- cmd1 = 'insert into normal_in_float_double_1 values(now,\'888\',\'666666\') ;'
+ tdLog.info("=============== step4.5,insert normal table right data and check in function")
+ cmd1 = 'insert into normal_in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
- tdSql.execute(cmd1)
-
- cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');'
- #tdLog.info(cmd2)
- #tdSql.error(cmd2)
- #try:
- # tdSql.execute(cmd2)
- # tdLog.exit("invalid operation: not supported filter condition")
- #except Exception as e:
- # tdLog.info(repr(e))
- # tdLog.info("invalid operation: not supported filter condition")
- #
- #cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
- #tdLog.info(cmd3)
- #tdSql.error(cmd3)
- #try:
- # tdSql.execute(cmd3)
- # tdLog.exit("invalid operation: not supported filter condition")
- #except Exception as e:
- # tdLog.info(repr(e))
- # tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd1)
+
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+
+ cmd2 = 'insert into normal_in_ts_float_double_1 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
+ tdLog.info(cmd2)
+ tdSql.execute(cmd2)
+
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+
+ cmd3 = 'insert into normal_in_ts_float_double_1 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
+ tdLog.info(cmd3)
+ tdSql.execute(cmd3)
+
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+
+ cmd4 = '''select * from normal_in_ts_float_double_1
+ where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and in_double in (0.000000000,66666.000000000,-88888.000000000)
+ and in_float in (0.00000,666.00000,-888.00000)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(1,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(1,2,666.00000)
+ tdSql.checkData(1,3,-88888.000000000)
+ tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,2,0.00000)
+ tdSql.checkData(2,3,0.000000000)
+
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/insert/insertFromCSVOurofOrder.py b/tests/pytest/insert/insertFromCSV.py
similarity index 58%
rename from tests/pytest/insert/insertFromCSVOurofOrder.py
rename to tests/pytest/insert/insertFromCSV.py
index d4de85b7e93e78ad12962c54fbd1014615dc8e3b..c5d36485699dd2b798b353b614008be7234edfd4 100644
--- a/tests/pytest/insert/insertFromCSVOurofOrder.py
+++ b/tests/pytest/insert/insertFromCSV.py
@@ -28,16 +28,15 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
- self.ts = 1500074556514
+ self.ts = 1500074556514
+ self.csvfile = "/tmp/csvfile.csv"
+ self.rows = 100000
def writeCSV(self):
- with open('test3.csv','w', encoding='utf-8', newline='') as csvFile:
+ with open(self.csvfile, 'w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile, dialect='excel')
- for i in range(1000000):
- newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000)
- d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
- dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
- writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
+ for i in range(self.rows):
+ writer.writerow([self.ts + i, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
def removCSVHeader(self):
data = pd.read_csv("ordered.csv")
@@ -45,23 +44,25 @@ class TDTestCase:
data.to_csv("ordered.csv", header = False, index = False)
def run(self):
+ self.writeCSV()
+
tdSql.prepare()
-
tdSql.execute("create table t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
startTime = time.time()
- tdSql.execute("insert into t1 file 'outoforder.csv'")
+ tdSql.execute("insert into t1 file '%s'" % self.csvfile)
duration = time.time() - startTime
- print("Out of Order - Insert time: %d" % duration)
- tdSql.query("select count(*) from t1")
- rows = tdSql.getData(0, 0)
+ print("Insert time: %d" % duration)
+ tdSql.query("select * from t1")
+ tdSql.checkRows(self.rows)
- tdSql.execute("create table t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
- startTime = time.time()
- tdSql.execute("insert into t2 file 'ordered.csv'")
- duration = time.time() - startTime
- print("Ordered - Insert time: %d" % duration)
- tdSql.query("select count(*) from t2")
- tdSql.checkData(0,0, rows)
+ tdSql.execute("create table stb(ts timestamp, c1 int, c2 float, c3 int, c4 int) tags(t1 int, t2 binary(20))")
+ tdSql.execute("insert into t2 using stb(t1) tags(1) file '%s'" % self.csvfile)
+ tdSql.query("select * from stb")
+ tdSql.checkRows(self.rows)
+
+ tdSql.execute("insert into t3 using stb tags(1, 'test') file '%s'" % self.csvfile)
+ tdSql.query("select * from stb")
+ tdSql.checkRows(self.rows * 2)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/insert/insert_locking.py b/tests/pytest/insert/insert_locking.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d780a7132fbc83b99e4f5b54fe17101ff4f35f9
--- /dev/null
+++ b/tests/pytest/insert/insert_locking.py
@@ -0,0 +1,178 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import random
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5021
+
+ tdLog.info("\n\n----------step1 : drop db and create db----------\n")
+ tdSql.execute('''drop database if exists db ;''')
+ tdSql.execute('''create database db ;''')
+ sql = '''show databases;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("\n\n----------step2 : create stable----------\n")
+ tdSql.execute('''create stable
+ db.stable_1 (ts timestamp, payload binary(256))
+ tags(t1 binary(16),t2 int);''')
+ sql = '''show db.stables;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("\n\n----------step3 : create table and insert----------\n")
+ sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" unexpected token")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
+
+ sql = '''insert into db.table1(ts , payload) using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" bind columns again")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: syntax error near ', ;' (bind columns again)")
+
+ sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1",111) (ts , payload) ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" keyword VALUES or FILE required ")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: invalid SQL: (keyword VALUES or FILE required)")
+
+ tdSql.execute('''insert into db.table1 using db.stable_1 (t1 , t2)
+ tags ("table_1" , 111) values ( now , 1) ''')
+ sql = '''select * from db.stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,1)
+ tdSql.checkData(0,2,'table_1')
+
+ tdLog.info("\n\n----------step4 : create table and insert again----------\n")
+ sql = '''insert into db.table2 using db.stable_1 (t1) tags ("table_2") ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" unexpected token")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
+
+ tdSql.execute('''insert into db.table2 using db.stable_1 (t1)
+ tags ("table_2") values ( now , 2) ''')
+ sql = '''select * from db.stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(2)
+ tdSql.checkData(1,1,2)
+ tdSql.checkData(1,2,'table_2')
+
+ tdLog.info("\n\n----------step5 : create table and insert without db----------\n")
+ tdSql.execute('''use db''')
+ sql = '''insert into table3 using stable_1 (t1) tags ("table_3") ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" unexpected token")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
+
+ tdSql.execute('''insert into table3 using stable_1 (t1 , t2)
+ tags ("table_3" , 333) values ( now , 3) ''')
+ sql = '''select * from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(3)
+ tdSql.checkData(2,1,3)
+ tdSql.checkData(2,2,'table_3')
+
+ tdLog.info("\n\n----------step6 : create tables in one sql ----------\n")
+ sql = '''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
+ table5 using stable_1 (t1) tags ("table_5") ( values (now, ;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" unexpected token")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
+
+ tdSql.execute('''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
+ table5 using stable_1 (t1) tags ("table_5") values (now, 5) ''')
+ sql = '''select * from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(5)
+ tdSql.checkData(3,1,4)
+ tdSql.checkData(3,2,'table_4')
+ tdSql.checkData(4,1,5)
+ tdSql.checkData(4,2,'table_5')
+
+
+ sql = '''insert into table6 using stable_1 (t1) tags ("table_6") ( values (now,
+ table7 using stable_1 (t1) tags ("table_7") values (now, 7);'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" invalid SQL")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("invalid SQL")
+
+ tdSql.execute('''insert into table6 using stable_1 (t1 , t2) tags ("table_6" , 666) values (now, 6)
+ table7 using stable_1 (t1) tags ("table_7") values (now, 7) ''')
+ sql = '''select * from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(7)
+ tdSql.checkData(5,1,6)
+ tdSql.checkData(5,2,'table_6')
+ tdSql.checkData(6,1,7)
+ tdSql.checkData(6,2,'table_7')
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/insert/retentionpolicy.py b/tests/pytest/insert/retentionpolicy.py
index e0446113d6d1fb197490a09ebd1ebe4b5b12e66f..607ee26a59969f1ecafd4160e4f9db58e3af568a 100644
--- a/tests/pytest/insert/retentionpolicy.py
+++ b/tests/pytest/insert/retentionpolicy.py
@@ -71,13 +71,10 @@ class TDTestRetetion:
tdDnodes.start(1)
tdLog.info(cmd)
+ ttime = datetime.datetime.now()
tdSql.execute(cmd)
self.queryRows=tdSql.query('select * from test')
- if self.queryRows==4:
- self.checkRows(4,cmd)
- return 0
- else:
- self.checkRows(5,cmd)
+ self.checkRows(3,cmd)
tdLog.info("=============== step3")
tdDnodes.stop(1)
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=48)))
@@ -92,7 +89,7 @@ class TDTestRetetion:
tdLog.info(cmd)
tdSql.execute(cmd)
self.queryRows=tdSql.query('select * from test')
- self.checkRows(6,cmd)
+ self.checkRows(3,cmd)
tdLog.info("=============== step4")
tdDnodes.stop(1)
tdDnodes.start(1)
@@ -100,7 +97,7 @@ class TDTestRetetion:
tdLog.info(cmd)
tdSql.execute(cmd)
self.queryRows=tdSql.query('select * from test')
- self.checkRows(5,cmd)
+ self.checkRows(4,cmd)
tdLog.info("=============== step5")
tdDnodes.stop(1)
@@ -109,6 +106,23 @@ class TDTestRetetion:
self.queryRows=tdSql.query('select * from test where ts > now-1d')
self.checkRows(2,cmd)
+ tdLog.info("=============== step6")
+ tdDnodes.stop(1)
+ os.system("date -s '%s'"%(ttime + datetime.timedelta(seconds=(72*60*60-7))))
+ tdDnodes.start(1)
+ while datetime.datetime.now() < (ttime + datetime.timedelta(seconds=(72*60*60-1))):
+ time.sleep(0.001)
+ cmd = 'select * from test'
+ self.queryRows=tdSql.query(cmd)
+ self.checkRows(4,cmd)
+ while datetime.datetime.now() <= (ttime + datetime.timedelta(hours=72)):
+ time.sleep(0.001)
+ time.sleep(0.01)
+ cmd = 'select * from test'
+ self.queryRows=tdSql.query(cmd)
+ print(tdSql.queryResult)
+ self.checkRows(3,cmd)
+
def stop(self):
os.system("sudo timedatectl set-ntp true")
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=1)))
diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
new file mode 100644
index 0000000000000000000000000000000000000000..26eda1120b6026655add2bcf6c601bf8dd22c54a
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
@@ -0,0 +1,79 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+import random
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1593548685000
+ self.tables = 10
+ self.rowsPerTable = 100
+
+
+ def run(self):
+ # tdSql.execute("drop database db ")
+ tdSql.prepare()
+ tdSql.execute("create table st (ts timestamp, num int, value int) tags (loc nchar(30))")
+ for i in range(self.tables):
+ for j in range(self.rowsPerTable):
+ args1=(i, i, self.ts + i * self.rowsPerTable + j * 10000, i, random.randint(1, 100))
+ tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d)" % args1)
+
+ tdSql.query("select * from (select * from st)")
+ tdSql.checkRows(self.tables * self.rowsPerTable)
+
+ tdSql.query("select * from (select * from st limit 10)")
+ tdSql.checkRows(10)
+
+ tdSql.query("select * from (select * from st order by ts desc limit 10)")
+ tdSql.checkRows(10)
+
+ # bug: https://jira.taosdata.com:18080/browse/TD-5043
+ tdSql.query("select * from (select * from st order by ts desc limit 10 offset 1000)")
+ tdSql.checkRows(0)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname")
+ tdSql.checkRows(self.tables)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname)")
+ tdSql.checkRows(self.tables)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5")
+ tdSql.checkRows(5)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5 soffset 7")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)")
+ tdSql.checkRows(3)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/nestquery_last_row.py b/tests/pytest/query/nestquery_last_row.py
new file mode 100644
index 0000000000000000000000000000000000000000..a04cb173af25a5cb1b02cb7227f13426503d080e
--- /dev/null
+++ b/tests/pytest/query/nestquery_last_row.py
@@ -0,0 +1,263 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import random
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1600000000000
+ self.num = 10
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-4735
+
+ tdSql.execute('''create stable stable_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
+ t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create table table_0 using stable_1
+ tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
+ tdSql.execute('''create table table_1 using stable_1
+ tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
+ 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_2 using stable_1
+ tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
+ 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_3 using stable_1
+ tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
+ tdSql.execute('''create table table_4 using stable_1
+ tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
+ tdSql.execute('''create table table_5 using stable_1
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
+ #regular table
+ tdSql.execute('''create table regular_table_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp) ;''')
+
+ for i in range(self.num):
+ tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ sql = '''select * from stable_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select * from regular_table_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+
+ tdLog.info("=======last_row(*)========")
+ sql = '''select last_row(*) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+ sql = '''select last_row(*) from regular_table_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+
+ sql = '''select * from stable_1
+ where loc = 'table_0';'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select last_row(*) from
+ (select * from stable_1
+ where loc = 'table_0');'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last_row(*) from
+ (select * from stable_1);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+ tdSql.checkData(0,2,self.num-1)
+ tdSql.checkData(0,3,self.num-1)
+ tdSql.checkData(0,4,self.num-1)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary5.9')
+ tdSql.checkData(0,7,'nchar5.9')
+ tdSql.checkData(0,8,9.00000)
+ tdSql.checkData(0,9,9.000000000)
+ tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
+ tdSql.checkData(0,11,'table_5')
+ tdSql.checkData(0,12,5)
+ tdSql.checkData(0,13,5)
+ tdSql.checkData(0,14,5)
+ tdSql.checkData(0,15,5)
+ tdSql.checkData(0,16,'True')
+ tdSql.checkData(0,17,'binary5')
+ tdSql.checkData(0,18,'nchar5')
+ tdSql.checkData(0,21,'1970-01-01 08:00:00.000')
+
+ sql = '''select * from regular_table_1 ;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select last_row(*) from
+ (select * from regular_table_1);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+ tdSql.checkData(0,2,self.num-1)
+ tdSql.checkData(0,3,self.num-1)
+ tdSql.checkData(0,4,self.num-1)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary5.9')
+ tdSql.checkData(0,7,'nchar5.9')
+ tdSql.checkData(0,8,9.00000)
+ tdSql.checkData(0,9,9.000000000)
+ tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
+
+ sql = '''select last_row(*) from
+ ((select * from table_0) union all
+ (select * from table_1) union all
+ (select * from table_2));'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+ tdSql.checkData(0,2,self.num-1)
+ tdSql.checkData(0,3,self.num-1)
+ tdSql.checkData(0,4,self.num-1)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary.9')
+ tdSql.checkData(0,7,'nchar.9')
+ tdSql.checkData(0,8,9.00000)
+ tdSql.checkData(0,9,9.000000000)
+ tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
+
+ # bug 5055
+ # sql = '''select last_row(*) from
+ # ((select * from stable_1) union all
+ # (select * from table_1) union all
+ # (select * from regular_table_1));'''
+ # tdSql.query(sql)
+ # tdSql.checkData(0,1,self.num-1)
+
+ sql = '''select last_row(*) from
+ ((select last_row(*) from table_0) union all
+ (select last_row(*) from table_1) union all
+ (select last_row(*) from table_2));'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+ tdSql.checkData(0,2,self.num-1)
+ tdSql.checkData(0,3,self.num-1)
+ tdSql.checkData(0,4,self.num-1)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary.9')
+ tdSql.checkData(0,7,'nchar.9')
+ tdSql.checkData(0,8,9.00000)
+ tdSql.checkData(0,9,9.000000000)
+ tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
+
+ # bug 5055
+ # sql = '''select last_row(*) from
+ # ((select last_row(*) from stable_1) union all
+ # (select last_row(*) from table_1) union all
+ # (select last_row(*) from regular_table_1));'''
+ # tdSql.query(sql)
+ # tdSql.checkData(0,1,self.num-1)
+
+ sql = '''select last_row(*) from
+ ((select * from table_0 limit 5 offset 5) union all
+ (select * from table_1 limit 5 offset 5) union all
+ (select * from regular_table_1 limit 5 offset 5));'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+ tdSql.checkData(0,2,self.num-1)
+ tdSql.checkData(0,3,self.num-1)
+ tdSql.checkData(0,4,self.num-1)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary.9')
+ tdSql.checkData(0,7,'nchar.9')
+ tdSql.checkData(0,8,9.00000)
+ tdSql.checkData(0,9,9.000000000)
+ tdSql.checkData(0,10,'2020-09-13 20:26:40.009')
+
+
+ sql = '''select last_row(*) from
+ (select * from stable_1)
+ having q_int>5;'''
+ tdLog.info(sql)
+ tdSql.error(sql)
+ try:
+ tdSql.execute(sql)
+ tdLog.exit(" having only works with group by")
+ except Exception as e:
+ tdLog.info(repr(e))
+ tdLog.info("invalid operation: having only works with group by")
+
+ #bug 5057
+ # sql = '''select last_row(*) from
+ # (select * from (select * from stable_1))'''
+ # tdLog.info(sql)
+ # tdSql.error(sql)
+ # try:
+ # tdSql.execute(sql)
+ # tdLog.exit(" core dumped")
+ # except Exception as e:
+ # tdLog.info(repr(e))
+ # tdLog.info("core dumped")
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py
index ce8d05ae50aa61646b20e07e9484d00559e92b49..d61e8cf288c97fc869f19cba6bd3d181dc60797c 100644
--- a/tests/pytest/query/queryInterval.py
+++ b/tests/pytest/query/queryInterval.py
@@ -114,8 +114,7 @@ class TDTestCase:
tdSql.query("select first(ts),twa(c) from tb interval(14a)")
tdSql.checkRows(6)
- tdSql.query("select twa(c) from tb group by c")
- tdSql.checkRows(4)
+ tdSql.error("select twa(c) from tb group by c")
def stop(self):
diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py
index 720ae745cb9b3780f1ca7ffaf96d76eda5f307b1..742a3c2cd1907107c7cca54c7fb37862227b077f 100644
--- a/tests/pytest/query/queryPerformance.py
+++ b/tests/pytest/query/queryPerformance.py
@@ -45,28 +45,38 @@ class taosdemoQueryPerformace:
sql = "select count(*) from test.meters"
tableid = 1
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.meters"
tableid = 2
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select count(*) from test.meters where loc='beijing'"
tableid = 3
cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.meters where areaid=10"
tableid = 4
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.t10 interval(10s)"
tableid = 5
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select last_row(*) from meters"
tableid = 6
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select * from meters"
tableid = 7
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'"
tableid = 8
cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
-
+
+ sql = "select last(*) from meters"
+ tableid = 9
+ cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
cursor.close()
def query(self):
diff --git a/tests/pytest/tools/taosdemoAllTest/sub.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
similarity index 58%
rename from tests/pytest/tools/taosdemoAllTest/sub.json
rename to tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
index fe3c892a76bcc30678f60127d28ce79bf8682c18..93462d2c66cea62c21f0cc196652c94439f47bc0 100644
--- a/tests/pytest/tools/taosdemoAllTest/sub.json
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
@@ -9,29 +9,33 @@
"confirm_parameter_prompt": "no",
"specified_table_query":
{
- "concurrent":1,
+ "concurrent":2,
"mode":"sync",
"interval":0,
- "restart":"yes",
+ "resubAfterConsume":1,
+ "endAfterConsume":1,
"keepProgress":"yes",
+ "restart":"no",
"sqls": [
{
- "sql": "select * from stb00_0 ;",
+ "sql": "select * from stb00_0",
"result": "./subscribe_res0.txt"
}]
},
"super_table_query":
{
"stblname": "stb0",
- "threads":1,
+ "threads":2,
"mode":"sync",
- "interval":10000,
- "restart":"yes",
+ "interval":1000,
+ "resubAfterConsume":1,
+ "endAfterConsume":1,
"keepProgress":"yes",
+ "restart":"no",
"sqls": [
{
- "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
- "result": "./subscribe_res1.txt"
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
}]
}
}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json
new file mode 100644
index 0000000000000000000000000000000000000000..4229f304e44fcda58a0e16b1e6445ebd339215d3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac221905655e53e9053336be8fcaaa8b1070639c
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":-1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json
new file mode 100644
index 0000000000000000000000000000000000000000..7d937212c94bd002307695c7059d67ad0a4e68d3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":0,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":0,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json
new file mode 100644
index 0000000000000000000000000000000000000000..bf8927a58badfa606103d4b11d09f871ed64260f
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":-1,
+ "endAfterConsume":2,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
index 1401716da9095b44aa47e9ecb2e7131bc0a8b9ea..fe29409f296b310012773b9d78ca8735cfd52a13 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
@@ -78,7 +78,7 @@ class TDTestCase:
tdSql.checkData(0, 0, "%d" % suc_kill)
os.system("rm -rf querySystemInfo*")
os.system("rm -rf insert_res.txt")
- os.system("rm -rf insert_res.txt")
+ os.system("rm -rf query_res.txt")
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
index a92906fa730833108ad758d3fc53c954279abe38..62b6e7472aa779888a45603b06cf54a528923dec 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
+++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
@@ -13,7 +13,7 @@
"sqls":[
{
"sql": "select * from stb0",
- "result": ""
+ "result": "./query_res.txt"
}
]
}
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv
new file mode 100644
index 0000000000000000000000000000000000000000..d4138798e350bb1d0aff25819c01f87604b763bc
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv
@@ -0,0 +1,10 @@
+0,0,'TAOSdata-0'
+1,1,'TAOSdata-1'
+2,22,'TAOSdata-2'
+3,333,'TAOSdata-3'
+4,4444,'TAOSdata-4'
+5,55555,'TAOSdata-5'
+6,666666,'TAOSdata-6'
+7,7777777,'TAOSdata-7'
+8,88888888,'TAOSdata-8'
+9,999999999,'TAOSdata-9'
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json
new file mode 100644
index 0000000000000000000000000000000000000000..265f42036bc5a4e13dc0766b66fccf32924d7185
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10000,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1,
+ "data_source": "sample",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
+ "tags_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
+ "columns": [{"type": "INT","count":2}, {"type": "BINARY", "len": 16, "count":1}],
+ "tags": [{"type": "INT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py
new file mode 100644
index 0000000000000000000000000000000000000000..081057f1802bd18d8aab7e7639589e8759ed44ed
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py
@@ -0,0 +1,191 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ # test case for https://jira.taosdata.com:18080/browse/TD-4985
+ os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 10000)
+
+ for i in range(1000):
+ tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')'''
+ % (1600000000000 + i, i, -10000+i, i))
+ tdSql.query("select * from stb0 where col2 like 'test99%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test98%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test97%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test96%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test95%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test94%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test93%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test92%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test91%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+ tdSql.query("select * from stb0 where col2 like 'test90%' ")
+ tdSql.checkRows(1000)
+ tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" )
+ tdSql.checkData(0, 1, 0)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 2)
+ tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100 offset 5" )
+ tdSql.checkData(0, 1, 5)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 7)
+
+
+ os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/convertResFile.py b/tests/pytest/tools/taosdemoAllTest/convertResFile.py
index 52bb8f40d0f0a5a55450ecb4927067f37f862499..5ed2fec13b1e0722937023d829e5e9b9fa1ad623 100644
--- a/tests/pytest/tools/taosdemoAllTest/convertResFile.py
+++ b/tests/pytest/tools/taosdemoAllTest/convertResFile.py
@@ -2,6 +2,14 @@ from datetime import datetime
import time
import os
+# class FileSeparaSpaceConvertcomma:
+# def __init__(self):
+# self.inputfile = ""
+# self.oputfile = ""
+# self.affectedRows = 0
+
+# def ConvertFile(self, inputfile,):
+
os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt")
with open('./new_query_res0.txt','r+') as f0:
contents = f0.readlines()
diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
index f2dca662fddc5991a9dcdb8371dc0e4086868190..0ae3a7194f8320b3919f850e19861f7796d2a5cc 100644
--- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
+++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
@@ -8,7 +8,7 @@
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
- "confirm_parameter_prompt": "no",
+ "confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
index 55be0198916e3737d185deaa231885fbfa607c66..cd69badad154c6417d0e8d57f4d252354d40ad6b 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
@@ -71,7 +71,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
@@ -97,7 +97,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
@@ -123,7 +123,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 100,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b56830189623d344168918f239887c3359b2645
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1000,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json
new file mode 100644
index 0000000000000000000000000000000000000000..91234d5e48af891c4dfd0fdfd88121e123bf4edc
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 50000,
+ "num_of_records_per_req": 50000,
+ "max_sql_len": 1025000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 32767,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json
new file mode 100644
index 0000000000000000000000000000000000000000..d05e1c249f25c17c37e40626bf0d3c5a96e5fffe
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 100,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "rest",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
index 5cf8114472e00d5ebc90b5dc762f22f9698f7d76..88218b4989d5e01178142aa9acf2332b34718826 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
@@ -11,8 +11,8 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
- "num_of_records_per_req": 100,
- "max_sql_len": 10240000000,
+ "num_of_records_per_req": 1000000,
+ "max_sql_len": 1024000000,
"databases": [{
"dbinfo": {
"name": "db1",
@@ -45,7 +45,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
+ "interlace_rows": 10000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
index 703f755c31c7b325e34b93878e2e3175648834ef..077ced5d02c792b1c3344ea3e8b129038652b4b8 100644
--- a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
+++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
@@ -60,7 +60,7 @@ class TDTestCase:
tdSql.checkData(0, 0, 1000000)
os.system("rm -rf ./insert_res.txt")
- os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql")
+ os.system("rm -rf tools/taosdemoAllTest/moredemo-insert-offset.py.sql")
def stop(self):
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
new file mode 100644
index 0000000000000000000000000000000000000000..69557a784180acec3c6de059b9285df4d4b31456
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 0,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..9074ae8fd1049d2dbaedfff881feefd84583ca20
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": -1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json
new file mode 100644
index 0000000000000000000000000000000000000000..fd047dec9497c64f8b8f4300617fcc90563b67bc
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 0,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..96a54cfd09cda09f8a9ebed169527c13092c7d57
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": -1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
similarity index 89%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
rename to tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
index 79471be2044d3ea7c637b4b1e500cfcc8e6413a9..99138e36668971ee2e9aa0656b2ee76f262723e3 100644
--- a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
+++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
@@ -35,7 +35,7 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
- "childtable_count": 100,
+ "childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
@@ -54,13 +54,13 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
- "childtable_count": 100,
+ "childtable_count": 10,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
@@ -79,7 +79,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json
new file mode 100644
index 0000000000000000000000000000000000000000..747f7b3c7e9ebb5720cae98811e136ece74d47e2
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 5,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0 ,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json
index 67a1cf3eb39c045192b5d35f698e38506777cef2..7ebad5e2b2f5af687656c8eed041579d7de1e2c2 100644
--- a/tests/pytest/tools/taosdemoAllTest/queryQps.json
+++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json
@@ -9,8 +9,8 @@
"databases": "db",
"query_times": 1,
"specified_table_query": {
- "query_interval": 0,
- "concurrent": 1,
+ "query_interval": 10000,
+ "concurrent": 4,
"sqls": [
{
"sql": "select last_row(*) from stb00_0",
@@ -24,8 +24,8 @@
},
"super_table_query": {
"stblname": "stb1",
- "query_interval":0,
- "threads": 1,
+ "query_interval":20000,
+ "threads": 4,
"sqls": [
{
"sql": "select last_row(ts) from xxxx",
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json b/tests/pytest/tools/taosdemoAllTest/queryRestful.json
similarity index 100%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json
rename to tests/pytest/tools/taosdemoAllTest/queryRestful.json
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json b/tests/pytest/tools/taosdemoAllTest/queryTaosc.json
similarity index 100%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json
rename to tests/pytest/tools/taosdemoAllTest/queryTaosc.json
diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimes0.json b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json
new file mode 100644
index 0000000000000000000000000000000000000000..63a13587728fa797a65794994c04378edb87a0c5
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": 0,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..039f7e10603cd2d06608cb24a2cb72356bd50728
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": -1,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subAsync.json b/tests/pytest/tools/taosdemoAllTest/subAsync.json
new file mode 100644
index 0000000000000000000000000000000000000000..67a3bf5aab85bc540b4b891039ba59960ff3f4b1
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subAsync.json
@@ -0,0 +1,45 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"async",
+ "interval":0,
+ "restart":"yes",
+ "keepProgress":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": "./subscribe_res1.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"async",
+ "interval":0,
+ "restart":"yes",
+ "keepProgress":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
index 7d14d0ad4b888fc099becb176e84af54bb769f50..1f9d794990dcbc0daaee2076f2ae6dfd1249b132 100644
--- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
+++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
@@ -35,26 +35,26 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
- "childtable_count": 1,
+ "childtable_count": 2,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 1,
+ "insert_rows": 10,
"childtable_limit": 0,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
- "disorder_range": 0,
+ "disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-02-25 10:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "BINARY", "len":50, "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..d5d0578f07526c18d541391597a3236c99f27544
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 200,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-02-25 10:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 20,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-02-25 10:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSync.json b/tests/pytest/tools/taosdemoAllTest/subSync.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa0b2cd7a4b454fd3332d72a521d244b5e567869
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSync.json
@@ -0,0 +1,45 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": "./subscribe_res1.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":10000,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json
new file mode 100644
index 0000000000000000000000000000000000000000..625e4792cfa113166e0ba5b0ef068bb2109bf027
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json
@@ -0,0 +1,49 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": ""
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": ""
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":10000,
+ "restart":"no",
+ "keepProgress":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": ""
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": ""
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b2828822e4a35989ff9c0d69a469bb34a0e7a84
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json
@@ -0,0 +1,439 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res1.txt"
+ },
+ {
+ "sql": "select * from stb00_1",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_2",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_3",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_4",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_5",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_6",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_7",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_8",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_9",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_10 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_11 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_12 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_13 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_14 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_15 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_16 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_17 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_18 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_19 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_20 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_21 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_22 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_23 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_24 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_25 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_26 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_27 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_28 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_29 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_30 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_31 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_32 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_33 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_34 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_35 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_36 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_37 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_38 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_39 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_40 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_41 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_42 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_43 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_44 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_45 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_46 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_47 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_48 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_49 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_50 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_51 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_52 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_53 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_54 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_55 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_56 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_57 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_58 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_59 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_60",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_61",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_62",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_63",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_64",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_65",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_66",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_67",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_68",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_69",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_70 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_71 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_72 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_73 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_74 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_75 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_76 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_77 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_78 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_79 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_80 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_81 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_82 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_83 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_84 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_85 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_86 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_87 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_88 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_89 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_90 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_91 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_92 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_93 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_94 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_95 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_96 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_97 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_98 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json
new file mode 100644
index 0000000000000000000000000000000000000000..c45a9ea48a147ae96256de60ab6d1f9c579f1431
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json
@@ -0,0 +1,439 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"async",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res1.txt"
+ },
+ {
+ "sql": "select * from stb00_1",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_2",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_3",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_4",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_5",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_6",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_7",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_8",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_9",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_10 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_11 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_12 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_13 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_14 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_15 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_16 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_17 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_18 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_19 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_20 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_21 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_22 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_23 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_24 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_25 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_26 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_27 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_28 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_29 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_30 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_31 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_32 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_33 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_34 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_35 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_36 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_37 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_38 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_39 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_40 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_41 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_42 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_43 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_44 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_45 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_46 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_47 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_48 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_49 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_50 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_51 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_52 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_53 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_54 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_55 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_56 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_57 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_58 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_59 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_60",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_61",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_62",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_63",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_64",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_65",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_66",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_67",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_68",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_69",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_70 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_71 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_72 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_73 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_74 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_75 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_76 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_77 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_78 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_79 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_80 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_81 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_82 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_83 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_84 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_85 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_86 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_87 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_88 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_89 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_90 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_91 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_92 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_93 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_94 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_95 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_96 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_97 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_98 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..3214d35bf04aa3b66b336734469539f42ea50c4c
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json
@@ -0,0 +1,426 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "super_table_query":
+ {
+ "stblname": "stb1",
+ "threads":4,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json
new file mode 100644
index 0000000000000000000000000000000000000000..075ec9cf5dc5fc75da2da4cde8a9358799af7cb9
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json
@@ -0,0 +1,426 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "super_table_query":
+ {
+ "stblname": "stb1",
+ "threads":4,
+ "mode":"async",
+ "interval":0,
+ "restart":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/sub_no_result.json b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json
new file mode 100644
index 0000000000000000000000000000000000000000..cdf7c2314ede28e9c3ccaa9d53864737ff3fac96
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json
@@ -0,0 +1,25 @@
+{
+ "filetype": "subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"sync",
+ "interval": 0,
+ "restart":"yes",
+ "keepProgress":"no",
+ "endAfterConsume": 1100000,
+ "sqls": [
+ {
+ "sql": "select * from st;",
+ "result": ""
+ }]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py
new file mode 100644
index 0000000000000000000000000000000000000000..270eea17cb6c913719fb67c4b8f33065b0a0445d
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py
@@ -0,0 +1,82 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import time
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+import _thread
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1601481600000
+ self.numberOfRecords = 1100000
+
+ def execCmdAndGetOutput(self, cmd):
+ r = os.popen(cmd)
+ text = r.read()
+ r.close()
+ return text
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ tdSql.prepare()
+ tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
+ tdSql.execute("create table t1 using st tags(0)")
+ currts = self.ts
+ finish = 0
+ while(finish < self.numberOfRecords):
+ sql = "insert into t1 values"
+ for i in range(finish, self.numberOfRecords):
+ sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
+ finish = i + 1
+ if (1048576 - len(sql)) < 16384:
+ break
+ tdSql.execute(sql)
+
+ binPath = buildPath+ "/build/bin/"
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/sub_no_result.json -g 2>&1 | tee sub_no_result.log" % binPath)
+ test_line = int(self.execCmdAndGetOutput("cat sub_no_result.log | wc -l"))
+ if(test_line < 1100024):
+ tdLog.exit("failed test subscribeNoResult: %d != expected(1100024)" % test_line)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
index 638a9c49b9b8cfe0864e4a158d3bb9ffe0b7985f..01e46eaaa00326c0da2aa2f61bb14a7349f3ca7f 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
@@ -64,6 +64,22 @@ class TDTestCase:
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
+ # restful connector insert data
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb01_1")
+ tdSql.checkData(0, 0, 20)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 200)
+
# insert: create mutiple tables per sql and insert one rows per sql .
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath)
@@ -165,6 +181,10 @@ class TDTestCase:
tdSql.query("select count(*) from db.stb0")
tdSql.checkData(0, 0, 10000)
tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath)
+ tdSql.query("select count(*) from db.stb0")
+ tdSql.checkRows(0)
+ tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("show stables like 'stb0%' ")
@@ -201,6 +221,12 @@ class TDTestCase:
tdSql.checkData(0, 0, "2019-10-01 00:00:00")
tdSql.query("select last(ts) from blf.p_0_topics_6 ")
tdSql.checkData(0, 0, "2020-09-29 23:59:00")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 5000000)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 5000000)
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
index 643cad942c6586486640ba125d520b46c93e3465..6021c9136ad235f3e9d07bb4f6654fdac54989e5 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
@@ -19,6 +19,9 @@ from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
+import ast
+# from assertpy import assert_that
+import subprocess
class TDTestCase:
def init(self, conn, logSql):
@@ -40,85 +43,145 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
-
+
+ # 获取taosc接口查询的结果文件中的内容,返回每行数据,并断言数据的第一列内容。
+ def assertfileDataTaosc(self,filename,expectResult):
+ self.filename = filename
+ self.expectResult = expectResult
+ with open("%s" % filename, 'r+') as f1:
+ for line in f1.readlines():
+ queryResult = line.strip().split()[0]
+ self.assertCheck(filename,queryResult,expectResult)
+
+ # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。
+ def getfileDataRestful(self,filename):
+ self.filename = filename
+ with open("%s" % filename, 'r+') as f1:
+ for line in f1.readlines():
+ contents = line.strip()
+ if contents.find("data") != -1:
+ contentsDict = ast.literal_eval(contents) # 字符串转换为字典
+ queryResult = contentsDict['data'][0][0]
+ break
+ return queryResult
+
+ # 获取taosc接口查询次数
+ def queryTimesTaosc(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # 获取restful接口查询次数
+ def queryTimesRestful(self,filename):
+ self.filename = filename
+ command = 'cat %s |grep "200 OK" |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # 定义断言结果是否正确。不正确返回错误结果,正确即通过。
+ def assertCheck(self,filename,queryResult,expectResult):
+ self.filename = filename
+ self.queryResult = queryResult
+ self.expectResult = expectResult
+ args0 = (filename, queryResult, expectResult)
+ assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
+ binPath = buildPath+ "/build/bin/"
+
+ # delete useless files
+ os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./all_query*")
+
+ # taosc query: query specified table and query super table
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" % binPath)
+ os.system("cat query_res0.txt* > all_query_res0_taosc.txt")
+ os.system("cat query_res1.txt* > all_query_res1_taosc.txt")
+ os.system("cat query_res2.txt* > all_query_res2_taosc.txt")
+
+ # correct Times testcases
+ queryTimes0Taosc = self.queryTimesTaosc("all_query_res0_taosc.txt")
+ self.assertCheck("all_query_res0_taosc.txt",queryTimes0Taosc,6)
+
+ queryTimes1Taosc = self.queryTimesTaosc("all_query_res1_taosc.txt")
+ self.assertCheck("all_query_res1_taosc.txt",queryTimes1Taosc,6)
+
+ queryTimes2Taosc = self.queryTimesTaosc("all_query_res2_taosc.txt")
+ self.assertCheck("all_query_res2_taosc.txt",queryTimes2Taosc,20)
+
+ # correct data testcase
+ self.assertfileDataTaosc("all_query_res0_taosc.txt","1604160000099")
+ self.assertfileDataTaosc("all_query_res1_taosc.txt","100")
+ self.assertfileDataTaosc("all_query_res2_taosc.txt","1604160000199")
- # query: query specified table and query super table
- os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryTaosc.json" % binPath)
- os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- tdSql.execute("use db")
- tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- tdSql.execute("insert into result0 file './test_query_res0.txt'")
- tdSql.query("select ts from result0")
- tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- tdSql.query("select count(*) from result0")
- tdSql.checkData(0, 0, 1)
- with open('./all_query_res1.txt','r+') as f1:
- result1 = int(f1.readline())
- tdSql.query("select count(*) from stb00_1")
- tdSql.checkData(0, 0, "%d" % result1)
-
- with open('./all_query_res2.txt','r+') as f2:
- result2 = int(f2.readline())
- d2 = datetime.fromtimestamp(result2/1000)
- timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- tdSql.query("select last_row(ts) from stb1")
- tdSql.checkData(0, 0, "%s" % timest)
+ # delete useless files
+ os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./all_query*")
+
+
+ # use restful api to query
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" % binPath)
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryRestful.json" % binPath)
+ os.system("cat query_res0.txt* > all_query_res0_rest.txt")
+ os.system("cat query_res1.txt* > all_query_res1_rest.txt")
+ os.system("cat query_res2.txt* > all_query_res2_rest.txt")
- # # delete useless files
- # os.system("rm -rf ./insert_res.txt")
- # os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
- # os.system("rm -rf ./querySystemInfo*")
- # os.system("rm -rf ./query_res*")
- # os.system("rm -rf ./all_query*")
- # os.system("rm -rf ./test_query_res0.txt")
-
-
- # # use restful api to query
- # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryRestful.json" % binPath)
- # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- # # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- # tdSql.execute("use db")
- # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- # os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- # tdSql.execute("insert into result0 file './test_query_res0.txt'")
- # tdSql.query("select ts from result0")
- # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- # tdSql.query("select count(*) from result0")
- # tdSql.checkData(0, 0, 1)
- # with open('./all_query_res1.txt','r+') as f1:
- # result1 = int(f1.readline())
- # tdSql.query("select count(*) from stb00_1")
- # tdSql.checkData(0, 0, "%d" % result1)
-
- # with open('./all_query_res2.txt','r+') as f2:
- # result2 = int(f2.readline())
- # d2 = datetime.fromtimestamp(result2/1000)
- # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- # tdSql.query("select last_row(ts) from stb1")
- # tdSql.checkData(0, 0, "%s" % timest)
+ # correct Times testcases
+ queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt")
+ self.assertCheck("all_query_res0_rest.txt",queryTimes0Restful,6)
+
+ queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt")
+ self.assertCheck("all_query_res1_rest.txt",queryTimes1Restful,6)
+ queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt")
+ self.assertCheck("all_query_res2_rest.txt",queryTimes2Restful,4)
+
+ # correct data testcase
+ data0 = self.getfileDataRestful("all_query_res0_rest.txt")
+ self.assertCheck('all_query_res0_rest.txt',data0,"2020-11-01 00:00:00.009")
+
+ data1 = self.getfileDataRestful("all_query_res1_rest.txt")
+ self.assertCheck('all_query_res1_rest.txt',data1,10)
+ data2 = self.getfileDataRestful("all_query_res2_rest.txt")
+ self.assertCheck('all_query_res2_rest.txt',data2,"2020-11-01 00:00:00.004")
+
# query times less than or equal to 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath)
- # query result print QPS
+ #query result print QPS
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath)
+
+ # use illegal or out of range parameters query json file
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
+ exceptcode = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" % binPath)
+ assert exceptcode != 0
+
+ exceptcode0 = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" % binPath)
+ assert exceptcode0 != 0
+
+ exceptcode1 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" % binPath)
+ assert exceptcode1 != 0
+ exceptcode2 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" % binPath)
+ assert exceptcode2 != 0
+
+ exceptcode3 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" % binPath)
+ assert exceptcode3 != 0
+
+ exceptcode4 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" % binPath)
+ assert exceptcode4 != 0
# delete useless files
os.system("rm -rf ./insert_res.txt")
@@ -127,6 +190,8 @@ class TDTestCase:
os.system("rm -rf ./query_res*")
os.system("rm -rf ./all_query*")
os.system("rm -rf ./test_query_res0.txt")
+
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
index 1275b6a8b5d9345147ad36351d4269f0968fff5d..3e967581a4491da4108b981ccd83949751406b82 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
@@ -19,6 +19,8 @@ from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
+import subprocess
+
class TDTestCase:
def init(self, conn, logSql):
@@ -40,7 +42,22 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
-
+
+ # get the number of subscriptions
+ def subTimes(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # assert results
+ def assertCheck(self,filename,subResult,expectResult):
+ self.filename = filename
+ self.subResult = subResult
+ self.expectResult = expectResult
+ args0 = (filename, subResult, expectResult)
+ assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
@@ -48,48 +65,136 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
+
+ # clear env
+ os.system("ps -ef |grep 'taosdemoAllTest/subSync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ os.system("ps -ef |grep 'taosdemoAllTest/subSyncKeepStart.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ sleep(1)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe_res*")
+ sleep(2)
+ # subscribe: sync
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/subSync.json &" % binPath)
+ query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subSync.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+
+ # insert extral data
+ tdSql.execute("use db")
+ tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)")
+ tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)")
+ sleep(5)
+
+ # merge result files
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
- # query: query specified table and query super table
- # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/sub.json" % binPath)
- # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- # tdSql.execute("use db")
- # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- # os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- # tdSql.execute("insert into result0 file './test_query_res0.txt'")
- # tdSql.query("select ts from result0")
- # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- # tdSql.query("select count(*) from result0")
- # tdSql.checkData(0, 0, 1)
- # with open('./all_query_res1.txt','r+') as f1:
- # result1 = int(f1.readline())
- # tdSql.query("select count(*) from stb00_1")
- # tdSql.checkData(0, 0, "%d" % result1)
-
- # with open('./all_query_res2.txt','r+') as f2:
- # result2 = int(f2.readline())
- # d2 = datetime.fromtimestamp(result2/1000)
- # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- # tdSql.query("select last_row(ts) from stb1")
- # tdSql.checkData(0, 0, "%s" % timest)
+ # correct subscribeTimes testcase
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22)
+
+ subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24)
+
+ subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21)
+ subTimes3 = self.subTimes("all_subscribe_res3.txt")
+ self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13)
- # # query times less than or equal to 100
- # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySpeciMutisql100.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySuperMutisql100.json" % binPath)
+
+ # correct data testcase
+ os.system("kill -9 %d" % query_pid)
+ sleep(3)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
+
+ # # sql number lager 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0
+
+ # # result files is null
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath)
+ # # assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) != 0
+
+
+
+
+ # resubAfterConsume= -1 endAfter=-1 ;
+ os.system('kill -9 `ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'` ')
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json & " % binPath)
+ sleep(2)
+ query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+ print("get sub1 process'pid")
+ subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1])
+ assert 0==subres0Number1 , "subres0Number1 error"
+ assert 0==subres2Number1 , "subres2Number1 error"
+ tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)")
+ sleep(4)
+ subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ assert 0!=subres2Number2 , "subres2Number2 error"
+ assert 0!=subres0Number2 , "subres0Number2 error"
+ os.system("kill -9 %d" % query_pid1)
+ os.system("rm -rf ./subscribe_res*")
+
+ # # resubAfterConsume= -1 endAfter=0 ;
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ # os.system('kill -9 `ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'` ')
+ # os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json & " % binPath)
+ # sleep(2)
+ # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+ # print("get sub2 process'pid")
+ # subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1])
+ # assert 0==subres0Number1 , "subres0Number1 error"
+ # assert 0==subres2Number1 , "subres2Number1 error"
+ # tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)")
+ # sleep(4)
+ # subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # assert 0!=subres2Number2 , "subres2Number2 error"
+ # assert 0!=subres0Number2 , "subres0Number2 error"
+ # os.system("kill -9 %d" % query_pid1)
+ # os.system("rm -rf ./subscribe_res*")
+ # # # merge result files
+ # os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ # os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ # os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ # # os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
+ # sleep(3)
+
+ # # correct subscribeTimes testcase
+ # subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ # self.assertCheck("all_subscribe_res0.txt",subTimes0 ,3960)
+
+ # subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ # self.assertCheck("all_subscribe_res1.txt",subTimes1 ,40)
+
+ # subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ # self.assertCheck("all_subscribe_res2.txt",subTimes2 ,1900)
+
+
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath)
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath)
+
+
+
# delete useless files
- # os.system("rm -rf ./insert_res.txt")
- # os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
- # os.system("rm -rf ./querySystemInfo*")
- # os.system("rm -rf ./query_res*")
- # os.system("rm -rf ./all_query*")
- # os.system("rm -rf ./test_query_res0.txt")
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2aa01e8703d9703d647507736130de2dd582bfb
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py
@@ -0,0 +1,124 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+import time
+from datetime import datetime
+import subprocess
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # 获取订阅次数
+ def subTimes(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ def assertCheck(self,filename,queryResult,expectResult):
+ self.filename = filename
+ self.queryResult = queryResult
+ self.expectResult = expectResult
+ args0 = (filename, queryResult, expectResult)
+ assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # clear env
+ os.system("ps -ef |grep 'taosdemoAllTest/subAsync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ sleep(1)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe_res*")
+
+ # subscribe: resultfile
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/subAsync.json &" % binPath)
+ query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subAsync.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+
+ # insert extral data
+ tdSql.execute("use db")
+ tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)")
+ tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)")
+ sleep(5)
+
+ # merge result files
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
+ # correct subscribeTimes testcase
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22)
+
+ subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24)
+
+ subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21)
+
+ subTimes3 = self.subTimes("all_subscribe_res3.txt")
+ self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13)
+
+ # correct data testcase
+
+ os.system("kill -9 %d" % query_pid)
+
+ # # query times less than or equal to 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0
+
+ # delete useless files
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py
index a45393e22284d675584c5dddd71fc507bcb2563f..90d2c52c15bfc9b5d8c72f3eaa5719f08da19367 100644
--- a/tests/pytest/tools/taosdemoPerformance.py
+++ b/tests/pytest/tools/taosdemoPerformance.py
@@ -16,11 +16,14 @@ import pandas as pd
import argparse
import os.path
import json
+from util.log import tdLog
+from util.sql import tdSql
+
class taosdemoPerformace:
def __init__(self, commitID, dbName):
self.commitID = commitID
- self.dbName = dbName
+ self.dbName = dbName
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@@ -30,8 +33,8 @@ class taosdemoPerformace:
self.user,
self.password,
self.config)
- self.insertDB = "insertDB";
-
+ self.insertDB = "insertDB"
+
def generateJson(self):
db = {
"name": "%s" % self.insertDB,
@@ -41,7 +44,7 @@ class taosdemoPerformace:
stb = {
"name": "meters",
- "child_table_exists":"no",
+ "child_table_exists": "no",
"childtable_count": 10000,
"childtable_prefix": "stb_",
"auto_create_table": "no",
@@ -57,12 +60,12 @@ class taosdemoPerformace:
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
- "tags_file": "",
+ "tags_file": "",
"columns": [
{"type": "INT", "count": 4}
- ],
+ ],
"tags": [
- {"type": "INT", "count":1},
+ {"type": "INT", "count": 1},
{"type": "BINARY", "len": 16}
]
}
@@ -88,7 +91,7 @@ class taosdemoPerformace:
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 30000,
- "databases": [db]
+ "databases": [db]
}
insert_json_file = f"/tmp/insert.json"
@@ -103,24 +106,56 @@ class taosdemoPerformace:
cmd.close()
return output
- def insertData(self):
- os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson())
- self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
- self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
- self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdemo" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def insertData(self):
+ tdSql.prepare()
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdemo not found!")
+ else:
+ tdLog.info("taosdemo found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system(
+ "%staosdemo -f %s > taosdemoperf.txt 2>&1" %
+ (binPath, self.generateJson()))
+ self.createTableTime = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
+ self.insertRecordsTime = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
+ self.recordsPerSecond = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
self.commitID = self.getCMDOutput("git rev-parse --short HEAD")
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $4}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $4}'")
self.avgDelay = delay[:-4]
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $6}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $6}'")
self.maxDelay = delay[:-4]
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $8}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $8}'")
self.minDelay = delay[:-3]
os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt")
def createTablesAndStoreData(self):
cursor = self.conn.cursor()
-
+
cursor.execute("create database if not exists %s" % self.dbName)
cursor.execute("use %s" % self.dbName)
cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float)")
@@ -130,13 +165,21 @@ class taosdemoPerformace:
print("records per second: %f" % float(self.recordsPerSecond))
print("avg delay: %f" % float(self.avgDelay))
print("max delay: %f" % float(self.maxDelay))
- print("min delay: %f" % float(self.minDelay))
- cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" %
- (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay)))
+ print("min delay: %f" % float(self.minDelay))
+ cursor.execute(
+ "insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" %
+ (float(
+ self.createTableTime), float(
+ self.insertRecordsTime), float(
+ self.recordsPerSecond), self.commitID, float(
+ self.avgDelay), float(
+ self.maxDelay), float(
+ self.minDelay)))
cursor.execute("drop database if exists %s" % self.insertDB)
cursor.close()
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -155,6 +198,6 @@ if __name__ == '__main__':
args = parser.parse_args()
- perftest = taosdemoPerformace(args.commit_id, args.database_name)
+ perftest = taosdemoPerformace(args.commit_id, args.database_name)
perftest.insertData()
perftest.createTablesAndStoreData()
diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py
index ff5921be604f9fe911f1aa8b84efe230baf20e07..4cae8dfd3cabcee3c52a2d1eefea41496994745d 100644
--- a/tests/pytest/tools/taosdemoTest.py
+++ b/tests/pytest/tools/taosdemoTest.py
@@ -36,7 +36,7 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if ("taosdemo" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
@@ -47,11 +47,11 @@ class TDTestCase:
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
- tdLog.exit("taosd not found!")
+ tdLog.exit("taosdemo not found!")
else:
- tdLog.info("taosd found in %s" % buildPath)
+ tdLog.info("taosdemo found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
- os.system("%staosdemo -y -t %d -n %d" %
+ os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" %
(binPath, self.numberOfTables, self.numberOfRecords))
tdSql.execute("use test")
diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py
index 9aa131624e83e04df12b59b0b0318562098c77cb..56c535916a51046e65b2ddd9813141ddb8848bd1 100644
--- a/tests/pytest/tools/taosdemoTestTblAlt.py
+++ b/tests/pytest/tools/taosdemoTestTblAlt.py
@@ -54,7 +54,7 @@ class TDTestCase:
binPath = buildPath + "/build/bin/"
if(threadID == 0):
- os.system("%staosdemo -y -t %d -n %d" %
+ os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT -m t" %
(binPath, self.numberOfTables, self.numberOfRecords))
if(threadID == 1):
time.sleep(2)
diff --git a/tests/pytest/tools/taosdemoTestWithoutMetric.py b/tests/pytest/tools/taosdemoTestWithoutMetric.py
index 9687600563d8fed68c6f9c67643759a3dcfa9703..01e19355d9dfde5c536ac1e28e1f190f33ab966e 100644
--- a/tests/pytest/tools/taosdemoTestWithoutMetric.py
+++ b/tests/pytest/tools/taosdemoTestWithoutMetric.py
@@ -60,7 +60,7 @@ class TDTestCase:
tdSql.execute("use test")
tdSql.query(
- "select count(*) from test.t%d" % (self.numberOfTables -1))
+ "select count(*) from test.d%d" % (self.numberOfTables -1))
tdSql.checkData(0, 0, self.numberOfRecords)
def stop(self):
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 534a477b340210b8ee6bcd77fe6010c6d3d261e0..b2c9eb3ec148cd5f28311131e2f07e15de330521 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -23,42 +23,110 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
self.ts = 1538548685000
self.numberOfTables = 10000
self.numberOfRecords = 100
-
+
+ def checkCommunity(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ return False
+ else:
+ return True
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdump" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
def run(self):
- tdSql.prepare()
+ if not os.path.exists("./taosdumptest/tmp1"):
+ os.makedirs("./taosdumptest/tmp1")
+ else:
+ print("目录存在")
- tdSql.execute("create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
- tdSql.execute("create table t1 using st tags(1, 'beijing')")
+ if not os.path.exists("./taosdumptest/tmp2"):
+ os.makedirs("./taosdumptest/tmp2")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
+ tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.execute("create table t1 using st tags(1, 'beijing')")
sql = "insert into t1 values"
currts = self.ts
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
-
- tdSql.execute("create table t2 using st tags(2, 'shanghai')")
+ tdSql.execute("create table t2 using st tags(2, 'shanghai')")
sql = "insert into t2 values"
currts = self.ts
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
- os.system("taosdump --databases db -o /tmp")
-
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath)
+ os.system(
+ "%staosdump --databases db1 -o ./taosdumptest/tmp2" %
+ binPath)
+
tdSql.execute("drop database db")
+ tdSql.execute("drop database db1")
tdSql.query("show databases")
tdSql.checkRows(0)
-
- os.system("taosdump -i /tmp")
- tdSql.query("show databases")
- tdSql.checkRows(1)
- tdSql.checkData(0, 0, 'db')
-
+ os.system("%staosdump -i ./taosdumptest/tmp1" % binPath)
+ os.system("%staosdump -i ./taosdumptest/tmp2" % binPath)
+
tdSql.execute("use db")
+ tdSql.query("show databases")
+ tdSql.checkRows(2)
+ dbresult = tdSql.queryResult
+ # 6--days,7--keep0,keep1,keep, 12--block,
+
+ isCommunity = self.checkCommunity()
+
+ print("iscommunity: %d" % isCommunity)
+ for i in range(len(dbresult)):
+ if dbresult[i][0] == 'db':
+ print(dbresult[i])
+ print(type(dbresult[i][6]))
+ print(type(dbresult[i][7]))
+ print(type(dbresult[i][9]))
+ assert dbresult[i][6] == 11
+ if isCommunity:
+ assert dbresult[i][7] == "3649"
+ else:
+ assert dbresult[i][7] == "3649,3649,3649"
+ assert dbresult[i][9] == 8
+ if dbresult[i][0] == 'db1':
+ assert dbresult[i][6] == 12
+ if isCommunity:
+ assert dbresult[i][7] == "3640"
+ else:
+ assert dbresult[i][7] == "3640,3640,3640"
+ assert dbresult[i][9] == 7
+
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'st')
@@ -80,10 +148,42 @@ class TDTestCase:
tdSql.checkData(i, 1, i)
tdSql.checkData(i, 2, "nchar%d" % i)
+ # drop all databases,boundary value testing.
+ # length(databasename)<=32;length(tablesname)<=192
+ tdSql.execute("drop database db")
+ tdSql.execute("drop database db1")
+ os.system("rm -rf ./taosdumptest/tmp1")
+ os.system("rm -rf ./taosdumptest/tmp2")
+ os.makedirs("./taosdumptest/tmp1")
+ tdSql.execute("create database db12312313231231321312312312_323")
+ tdSql.error("create database db12312313231231321312312312_3231")
+ tdSql.execute("use db12312313231231321312312312_323")
+ tdSql.execute("create stable st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.error("create stable st_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.execute(
+ "create stable st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.error("create stable st1(ts timestamp, c1 int, col2_012345678901234567890123456789012345678901234567890123456789 nchar(10)) tags(t1 int, t2 binary(10))")
+
+ tdSql.execute("select * from db12312313231231321312312312_323.st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9")
+ tdSql.error("create table t0_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9 using st tags(1, 'beijing')")
+ tdSql.query("show stables")
+ tdSql.checkRows(2)
+ os.system(
+ "%staosdump --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" % binPath)
+ tdSql.execute("drop database db12312313231231321312312312_323")
+ os.system("%staosdump -i ./taosdumptest/tmp1" % binPath)
+ tdSql.execute("use db12312313231231321312312312_323")
+ tdSql.query("show stables")
+ tdSql.checkRows(2)
+ os.system("rm -rf ./taosdumptest/tmp1")
+ os.system("rm -rf ./taosdumptest/tmp2")
+ os.system("rm -rf ./dump_result.txt")
+ os.system("rm -rf ./db.csv")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
new file mode 100644
index 0000000000000000000000000000000000000000..bed0564139e20fb6c562a7258af0cbd5b542069b
--- /dev/null
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -0,0 +1,99 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1601481600000
+ self.numberOfTables = 1
+ self.numberOfRecords = 15000
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdump" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
+ tdSql.execute("create table t1 using st tags(0)")
+ currts = self.ts
+ finish = 0
+ while(finish < self.numberOfRecords):
+ sql = "insert into t1 values"
+ for i in range(finish, self.numberOfRecords):
+ sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
+ finish = i + 1
+ if (1048576 - len(sql)) < 16384:
+ break
+ tdSql.execute(sql)
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system("rm /tmp/*.sql")
+ os.system(
+ "%staosdump --databases db -o /tmp -B 32766 -L 1048576" %
+ binPath)
+
+ tdSql.execute("drop database db")
+ tdSql.query("show databases")
+ tdSql.checkRows(0)
+
+ os.system("%staosdump -i /tmp" % binPath)
+
+ tdSql.query("show databases")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'db')
+
+ tdSql.execute("use db")
+ tdSql.query("show stables")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'st')
+
+ tdSql.query("select count(*) from t1")
+ tdSql.checkData(0, 0, self.numberOfRecords)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 0f71ffd0a37587cc6be895b4b8b168e6b8cfcaf8..ae4ba97eb3d6c9d9530d9f218bf05dd25aff3b02 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -127,6 +127,7 @@ class TDDnode:
"anyIp":"0",
"tsEnableTelemetryReporting":"0",
"dDebugFlag":"135",
+ "tsdbDebugFlag":"135",
"mDebugFlag":"135",
"sdbDebugFlag":"135",
"rpcDebugFlag":"135",
diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py
index 5071e915a5b2117465247a3bc762f77bbb59159f..d211f86b81d0b41237c645c7955a3d5a3099cf11 100644
--- a/tests/pytest/util/taosdemoCfg.py
+++ b/tests/pytest/util/taosdemoCfg.py
@@ -25,6 +25,21 @@ from multiprocessing import cpu_count
# TODO: fully test the function. Handle exceptions.
# Handle json format not accepted by taosdemo
+
+### How to use TaosdemoCfg:
+# Before you start:
+# Make sure you understand how is taosdemo's JSON file structured. Because the python used does
+# not support directory in directory for self objects, the config is being tear to different parts.
+# Please make sure you understand which directory represent which part of which type of the file
+# This module will reassemble the parts when creating the JSON file.
+#
+# Basic use example
+# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module
+# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit
+# step 2:use alter function to alter the specific config
+# step 3:use the generation function to generate the files
+#
+# step 1 and step 2 can be replaced with using import functions
class TDTaosdemoCfg:
def __init__(self):
self.insert_cfg = {
@@ -264,7 +279,7 @@ class TDTaosdemoCfg:
elif key == "super_table_query":
self.query_cfg["super_table_query"] = self.stable_query
else:
- self.table_query[key] = value
+ self.query_cfg[key] = value
def alter_sub_cfg(self, key, value):
if key == "specified_table_query":
@@ -272,7 +287,7 @@ class TDTaosdemoCfg:
elif key == "super_table_query":
self.sub_cfg["super_table_query"] = self.stable_sub
else:
- self.table_query[key] = value
+ self.sub_cfg[key] = value
def alter_sub_stb(self, key, value):
if key == "sqls":
diff --git a/tests/pytest/wal/sdbComp.py b/tests/pytest/wal/sdbComp.py
index c0ac02610f94dbe7c6bfb4ccd7aacd5ada03f205..56b18c49eb002791cbfbf1956e448e36694c1316 100644
--- a/tests/pytest/wal/sdbComp.py
+++ b/tests/pytest/wal/sdbComp.py
@@ -28,6 +28,7 @@ class TDTestCase:
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
+ global selfPath
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
@@ -53,7 +54,7 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
- testPath = buildPath[:buildPath.find("debug")]
+ testPath = selfPath+ "/../../../"
walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/"
#new db and insert data
diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py
index 117da8ca2ffd046e3cf23399174184b1f64e856b..e364145e190143f2807612350757b64519019daa 100644
--- a/tests/pytest/wal/sdbCompClusterReplica2.py
+++ b/tests/pytest/wal/sdbCompClusterReplica2.py
@@ -86,6 +86,18 @@ class TwoClients:
tdSql.execute("alter table stb2_0 add column col2 binary(4)")
tdSql.execute("alter table stb2_0 drop column col1")
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
+ tdSql.execute("drop dnode 10")
+ sleep(10)
+ os.system("rm -rf /var/lib/taos/*")
+ print("clear dnode chenhaoran02'data files")
+ os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &")
+ print("start taosd")
+ sleep(10)
+ tdSql.execute("reset query cache ;")
+ tdSql.execute("create dnode chenhaoran02 ;")
+
+
+
# stop taosd and compact wal file
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 937265753323a41e51ddf9a3be0061c66b6c586a..c820dd3bf56fb5268092dbdec2d37d7cfa0ca0c5 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -131,6 +131,7 @@ run general/parser/join.sim
run general/parser/join_multivnode.sim
run general/parser/select_with_tags.sim
run general/parser/groupby.sim
+run general/parser/top_groupby.sim
run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim
index c20a96fd1bd18ba911cce02859d7265d0294b76b..36f4c0e7dcfca27f77b0e701adff47e06f9d1bb0 100644
--- a/tests/script/general/db/alter_option.sim
+++ b/tests/script/general/db/alter_option.sim
@@ -198,29 +198,25 @@ if $data12_db != 1 then
return -1
endi
-sql alter database db wal 1
-sql show databases
-print wal $data12_db
-if $data12_db != 1 then
- return -1
-endi
+sql_error alter database db wal 1
+
-sql alter database db wal 1
-sql alter database db wal 2
-sql alter database db wal 1
-sql alter database db wal 2
-sql alter database db wal 0
+sql_error alter database db wal 1
+sql_error alter database db wal 2
+sql_error alter database db wal 1
+sql_error alter database db wal 2
+sql_error alter database db wal 0
sql_error alter database db wal 3
sql_error alter database db wal 4
sql_error alter database db wal -1
sql_error alter database db wal 1000
print ============== step fsync
-sql alter database db fsync 0
-sql alter database db fsync 1
-sql alter database db fsync 3600
-sql alter database db fsync 18000
-sql alter database db fsync 180000
+sql_error alter database db fsync 0
+sql_error alter database db fsync 1
+sql_error alter database db fsync 3600
+sql_error alter database db fsync 18000
+sql_error alter database db fsync 180000
sql_error alter database db fsync 180001
sql_error alter database db fsync -1
diff --git a/tests/script/general/db/topic1.sim b/tests/script/general/db/topic1.sim
index 4939e5a0e2274e8ab26465fe04ca99a79f9f9ce0..16399731208915602fea6dcd123fb85edf2e085b 100644
--- a/tests/script/general/db/topic1.sim
+++ b/tests/script/general/db/topic1.sim
@@ -495,18 +495,13 @@ if $data12_db != 1 then
endi
sql_error alter topic db wal 1
-sql alter database db wal 1
-sql show databases
-print wal $data12_db
-if $data12_db != 1 then
- return -1
-endi
+sql_error alter database db wal 1
-sql alter database db wal 1
-sql alter database db wal 2
-sql alter database db wal 1
-sql alter database db wal 2
-sql alter database db wal 0
+sql_error alter database db wal 1
+sql_error alter database db wal 2
+sql_error alter database db wal 1
+sql_error alter database db wal 2
+sql_error alter database db wal 0
sql_error alter database db wal 3
sql_error alter database db wal 4
sql_error alter database db wal -1
@@ -523,11 +518,11 @@ sql_error alter topic db wal -1
sql_error alter topic db wal 1000
print ============== step fsync
-sql alter database db fsync 0
-sql alter database db fsync 1
-sql alter database db fsync 3600
-sql alter database db fsync 18000
-sql alter database db fsync 180000
+sql_error alter database db fsync 0
+sql_error alter database db fsync 1
+sql_error alter database db fsync 3600
+sql_error alter database db fsync 18000
+sql_error alter database db fsync 180000
sql_error alter database db fsync 180001
sql_error alter database db fsync -1
@@ -615,17 +610,6 @@ if $rows != 1 then
return -1
endi
-sql alter database d1 fsync 0
-sql show topics;
-if $rows != 0 then
- return -1
-endi
-
-sql show databases;
-if $rows != 1 then
- return -1
-endi
-
sql drop database d1
sql show topics;
if $rows != 0 then
@@ -649,17 +633,6 @@ if $rows != 1 then
return -1
endi
-sql alter database d1 fsync 0
-sql show topics;
-if $rows != 1 then
- return -1
-endi
-
-sql show databases;
-if $rows != 1 then
- return -1
-endi
-
sql drop database d1
sql show topics;
if $rows != 0 then
diff --git a/tests/script/general/parser/create_db.sim b/tests/script/general/parser/create_db.sim
index 7881060ad178fe3b3f7a9ea0530a1ac517264a3e..a62a45e0233f5db81bc7565c856f08483c9d8aee 100644
--- a/tests/script/general/parser/create_db.sim
+++ b/tests/script/general/parser/create_db.sim
@@ -237,7 +237,42 @@ sql_error create database $db ctime 29
sql_error create database $db ctime 40961
# wal {0, 2}
-#sql_error create database $db wal 0
+sql create database testwal wal 0
+sql show databases
+if $rows != 1 then
+ return -1
+endi
+
+sql show databases
+print wallevel $data12_testwal
+if $data12_testwal != 0 then
+ return -1
+endi
+sql drop database testwal
+
+sql create database testwal wal 1
+sql show databases
+if $rows != 1 then
+ return -1
+endi
+sql show databases
+print wallevel $data12_testwal
+if $data12_testwal != 1 then
+ return -1
+endi
+sql drop database testwal
+
+sql create database testwal wal 2
+sql show databases
+if $rows != 1 then
+ return -1
+endi
+print wallevel $data12_testwal
+if $data12_testwal != 2 then
+ return -1
+endi
+sql drop database testwal
+
sql_error create database $db wal -1
sql_error create database $db wal 3
diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim
index 8cd2c333475a0d0140eb5c0c8ee0fa4186fccc97..762413d0a1e975c778ccd3d31e54e0f2d347cef2 100644
--- a/tests/script/general/parser/fill_us.sim
+++ b/tests/script/general/parser/fill_us.sim
@@ -959,14 +959,14 @@ endi
if $data31 != 9.000000000 then
return -1
endi
-if $data41 != null then
+if $data41 != NULL then
print ===== $data41
return -1
endi
if $data51 != 16.000000000 then
return -1
endi
-if $data61 != null then
+if $data61 != NULL then
print ===== $data61
return -1
endi
diff --git a/tests/script/general/parser/gendata.sh b/tests/script/general/parser/gendata.sh
index f56fdc34680f6fda559136a68f34ad38ed406bbd..b2074147ca0a0a4483d19192b45d875ad24a1541 100755
--- a/tests/script/general/parser/gendata.sh
+++ b/tests/script/general/parser/gendata.sh
@@ -4,3 +4,5 @@ Cur_Dir=$(pwd)
echo $Cur_Dir
echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
+echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
+echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index 507431f536cac84d61a18d9c599e6bf9d344766d..1fe19714bbd516c2e8938ce1290f04f8d2053839 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -692,6 +692,7 @@ if $data31 != 4 then
return -1
endi
+sql_error select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,c;
sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
if $rows != 40 then
return -1
@@ -741,4 +742,54 @@ if $data14 != 2 then
return -1
endi
+sql create table m1 (ts timestamp, k int, f1 int) tags(a int);
+sql create table tm0 using m1 tags(0);
+sql create table tm1 using m1 tags(1);
+
+sql insert into tm0 values('2020-1-1 1:1:1', 1, 10);
+sql insert into tm0 values('2020-1-1 1:1:2', 1, 20);
+sql insert into tm1 values('2020-2-1 1:1:1', 2, 10);
+sql insert into tm1 values('2020-2-1 1:1:2', 2, 20);
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 100
+system sh/exec.sh -n dnode1 -s start
+sleep 100
+
+sql connect
+sleep 100
+sql use group_db0;
+
+print =========================>TD-4894
+sql select count(*),k from m1 group by k;
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != 2 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data10 != 2 then
+ return -1
+endi
+
+if $data11 != 2 then
+ return -1
+endi
+
+sql_error select count(*) from m1 group by tbname,k,f1;
+sql_error select count(*) from m1 group by tbname,k,a;
+sql_error select count(*) from m1 group by k, tbname;
+sql_error select count(*) from m1 group by k,f1;
+sql_error select count(*) from tm0 group by tbname;
+sql_error select count(*) from tm0 group by a;
+sql_error select count(*) from tm0 group by k,f1;
+
+sql_error select count(*),f1 from m1 group by tbname,k;
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim
index a8d2102befeabf70d70e3a361ad5e933f021ce4a..e063333853e04faf1a7f4988b6dd1f11207aee5d 100644
--- a/tests/script/general/parser/having.sim
+++ b/tests/script/general/parser/having.sim
@@ -1835,5 +1835,8 @@ if $data04 != 1 then
endi
sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0;
+sql_error select count(*) from tb1 group by f1 having last(*) > 0;
+
+print bug for select count(*) k from tb1 group by f1 having k > 0;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim
index a38db3fe44e8857ba646128a856371468d723b2b..0fe5448869a5720a62550a88981114e737e4965b 100644
--- a/tests/script/general/parser/having_child.sim
+++ b/tests/script/general/parser/having_child.sim
@@ -306,41 +306,11 @@ endi
sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having twa(f1) > 0;
-sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3;
-if $rows != 1 then
- return -1
-endi
-if $data00 != 4.000000000 then
- return -1
-endi
-if $data01 != 2 then
- return -1
-endi
-if $data02 != 8 then
- return -1
-endi
-if $data03 != 4.000000000 then
- return -1
-endi
+sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3;
sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having sum(f1) > 0;
-sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4;
-if $rows != 1 then
- return -1
-endi
-if $data00 != 2.000000000 then
- return -1
-endi
-if $data01 != 2 then
- return -1
-endi
-if $data02 != 4 then
- return -1
-endi
-if $data03 != 2.000000000 then
- return -1
-endi
+sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4;
sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0;
if $rows != 4 then
diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim
index e9f0f1ed085cc75238681dc08b9601a8d591f6c4..cf11194ba7c3b805725a665c6f92d6bb465b9e4e 100644
--- a/tests/script/general/parser/import_file.sim
+++ b/tests/script/general/parser/import_file.sim
@@ -15,6 +15,8 @@ $inFileName = '~/data.csv'
$numOfRows = 10000
system general/parser/gendata.sh
+sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12));
+
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting import data
@@ -23,13 +25,48 @@ sql import into tbx file '~/data.sql'
sql select count(*) from tbx
if $rows != 1 then
+ print expect 1, actual: $rows
+ return -1
+endi
+
+if $data00 != 3 then
+ return -1
+endi
+
+sql drop table tbx;
+
+sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
+sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
+
+sql select count(*) from tbx
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 3 then
return -1
endi
-#if $data00 != $numOfRows then
-# print "expect: $numOfRows, act: $data00"
-# return -1
-#endi
+sql drop table tbx;
+sql insert into tbx using stbx(b) tags('abcf') file '~/data.sql';
+
+sql select ts,a,b from tbx;
+if $rows != 3 then
+ return -1
+endi
+
+if $data00 != @20-01-01 01:01:01.000@ then
+ print expect 20-01-01 01:01:01.000 , actual: $data00
+ return -1
+endi
+
+if $data01 != NULL then
+ return -1
+endi
+
+if $data02 != @abcf@ then
+ return -1
+endi
system rm -f ~/data.sql
diff --git a/tests/script/general/parser/last_cache.sim b/tests/script/general/parser/last_cache.sim
index 4b3285871b8e9414877a53aa205ba2e747e9d8e1..9c414263ecc65cc11327bbcfc7a79131984393b9 100644
--- a/tests/script/general/parser/last_cache.sim
+++ b/tests/script/general/parser/last_cache.sim
@@ -1,6 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim
index fea322ec16b0d67af41b2a727cffa409cef8b37a..7cdd04e2ccdb93c7e1f84298101d74e7c3af061f 100644
--- a/tests/script/general/parser/lastrow.sim
+++ b/tests/script/general/parser/lastrow.sim
@@ -82,5 +82,9 @@ endi
if $data01 != NULL then
return -1
endi
+sql select last_row(*) from (select f from lr_nested)
+if $rows != 1 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/line_insert.sim b/tests/script/general/parser/line_insert.sim
new file mode 100644
index 0000000000000000000000000000000000000000..f3067a3bbec8c7d566570704d6b84caaaa1f8e67
--- /dev/null
+++ b/tests/script/general/parser/line_insert.sim
@@ -0,0 +1,54 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 2000
+sql connect
+
+print =============== step1
+$db = testlp
+$mte = ste
+$mt = st
+sql drop database $db -x step1
+step1:
+sql create database $db precision 'us'
+sql use $db
+sql create stable $mte (ts timestamp, f int) TAGS(t1 bigint)
+
+line_insert st,t1=3i,t2=4,t3="t3" c1=3i,c3=L"passit",c2=false,c4=4 1626006833639000000
+line_insert st,t1=4i,t3="t41",t2=5 c1=3i,c3=L"passiT",c2=true,c4=5 1626006833640000000
+line_insert stf,t1=4i,t2=5,t3="t4" c1=3i,c3=L"passitagain",c2=true,c4=5 1626006833642000000
+line_insert ste,t2=5,t3=L"ste" c1=true,c2=4,c3="iam" 1626056811823316532
+
+sql select * from st
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != @21-07-11 20:33:53.639000@ then
+ return -1
+endi
+
+if $data03 != @passit@ then
+ return -1
+endi
+
+sql select * from stf
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from ste
+if $rows != 1 then
+ return -1
+endi
+
+#print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim
index 8249d9197f55998ae26cb6dd232b6a701bf0a32c..6035992d30f189175573dc3d985f40a0c359646a 100644
--- a/tests/script/general/parser/nestquery.sim
+++ b/tests/script/general/parser/nestquery.sim
@@ -179,21 +179,156 @@ if $data21 != 49.500000000 then
return -1
endi
-#define TSDB_FUNC_APERCT 7
-#define TSDB_FUNC_LAST_ROW 10
-#define TSDB_FUNC_TWA 14
-#define TSDB_FUNC_LEASTSQR 15
-#define TSDB_FUNC_ARITHM 23
-#define TSDB_FUNC_DIFF 24
-#define TSDB_FUNC_INTERP 28
-#define TSDB_FUNC_RATE 29
-#define TSDB_FUNC_IRATE 30
-#define TSDB_FUNC_DERIVATIVE 32
-
sql_error select stddev(c1) from (select c1 from nest_tb0);
sql_error select percentile(c1, 20) from (select * from nest_tb0);
+sql_error select interp(c1) from (select * from nest_tb0);
+sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
+sql_error select twa(c1) from (select c1 from nest_tb0);
+sql_error select irate(c1) from (select c1 from nest_tb0);
+sql_error select diff(c1), twa(c1) from (select * from nest_tb0);
+sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0);
+
+sql select apercentile(c1, 50) from (select * from nest_tb0) interval(1d)
+if $rows != 7 then
+ return -1
+endi
+
+if $data00 != @20-09-15 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 47.571428571 then
+ return -1
+endi
+
+if $data10 != @20-09-16 00:00:00.000@ then
+ return -1
+endi
+
+if $data11 != 49.666666667 then
+ return -1
+endi
+
+if $data20 != @20-09-17 00:00:00.000@ then
+ return -1
+endi
+
+if $data21 != 49.000000000 then
+ return -1
+endi
+
+if $data30 != @20-09-18 00:00:00.000@ then
+ return -1
+endi
+
+if $data31 != 48.333333333 then
+ return -1
+endi
+
+sql select twa(c1) from (select * from nest_tb0);
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 49.500000000 then
+ return -1
+endi
+
+sql select leastsquares(c1, 1, 1) from (select * from nest_tb0);
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != @{slop:0.000100, intercept:49.000000}@ then
+ return -1
+endi
+
+sql select irate(c1) from (select * from nest_tb0);
+if $data00 != 0.016666667 then
+ return -1
+endi
+
+sql select derivative(c1, 1s, 0) from (select * from nest_tb0);
+if $rows != 9999 then
+ return -1
+endi
+
+if $data00 != @20-09-15 00:01:00.000@ then
+ return -1
+endi
+
+if $data01 != 0.016666667 then
+ return -1
+endi
+
+if $data10 != @20-09-15 00:02:00.000@ then
+ return -1
+endi
+
+if $data11 != 0.016666667 then
+ return -1
+endi
+
+sql select diff(c1) from (select * from nest_tb0);
+if $rows != 9999 then
+ return -1
+endi
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
+if $rows != 7 then
+ return -1
+endi
+
+if $data00 != @20-09-15 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 48.666666667 then
+ print expect 48.666666667, actual: $data01
+ return -1
+endi
+
+if $data02 != 70080.000000000 then
+ return -1
+endi
+
+if $data03 != 99 then
+ return -1
+endi
+
+if $data04 != 0 then
+ return -1
+endi
+
+if $data05 != 1440 then
+ return -1
+endi
+
+if $data06 != 0 then
+ print $data06
+ return -1
+endi
+
+if $data07 != 1 then
+ return -1
+endi
+
+if $data08 != 99.000000000 then
+ print expect 99.000000000, actual: $data08
+ return -1
+endi
+
+if $data10 != @20-09-16 00:00:00.000@ then
+ return -1
+endi
+
+if $data11 != 49.777777778 then
+ return -1
+endi
+
+if $data12 != 71680.000000000 then
+ return -1
+endi
sql select top(x, 20) from (select c1 x from nest_tb0);
@@ -207,6 +342,9 @@ print ===================> group by + having
+print =========================> ascending order/descending order
+
+
print =========================> nest query join
@@ -273,7 +411,6 @@ if $data03 != @20-09-15 00:00:00.000@ then
return -1
endi
-sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
sql select diff(val) from (select c1 val from nest_tb0);
if $rows != 9999 then
return -1
@@ -287,4 +424,70 @@ if $data01 != 1 then
return -1
endi
+sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0
+
+print ===========>td-4805
+sql_error select tbname, i from (select * from nest_tb0) group by i;
+
+sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1;
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != 100 then
+ return -1
+endi
+
+if $data01 != 0 then
+ return -1
+endi
+
+if $data10 != 100 then
+ return -1
+endi
+
+if $data11 != 1 then
+ return -1
+endi
+
+print =====================>TD-5157
+sql select twa(c1) from nest_tb1 interval(19a);
+if $rows != 10000 then
+ return -1
+endi
+
+if $data00 != @20-09-14 23:59:59.992@ then
+ return -1
+endi
+
+if $data01 != 0.000083333 then
+ return -1
+endi
+
+print =================>us database interval query, TD-5039
+sql create database test precision 'us';
+sql use test;
+sql create table t1(ts timestamp, k int);
+sql insert into t1 values('2020-01-01 01:01:01.000', 1) ('2020-01-01 01:02:00.000', 2);
+sql select avg(k) from (select avg(k) k from t1 interval(1s)) interval(1m);
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != @20-01-01 01:01:00.000000@ then
+ return -1
+endi
+
+if $data01 != 1.000000000 then
+ return -1
+endi
+
+if $data10 != @20-01-01 01:02:00.000000@ then
+ return -1
+endi
+
+if $data11 != 2.000000000 then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim
index f5c94d2ae6d643d987176c845a9803fe8336848f..eb6cd75d2104f7ff61b5f5e5bccc12fdd239d3d5 100644
--- a/tests/script/general/parser/select_with_tags.sim
+++ b/tests/script/general/parser/select_with_tags.sim
@@ -190,32 +190,32 @@ if $rows != 12800 then
return -1
endi
-sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0;
-if $rows != 100 then
+sql select top(c1, 80), tbname, t1, t2 from select_tags_mt0;
+if $rows != 80 then
return -1
endi
-if $data00 != @70-01-01 08:03:30.100@ then
+if $data00 != @70-01-01 08:03:40.100@ then
return -1
endi
-if $data10 != @70-01-01 08:03:30.200@ then
+if $data10 != @70-01-01 08:03:40.200@ then
return -1
endi
-if $data01 != 110 then
+if $data01 != 111 then
return -1
endi
-if $data02 != @select_tags_tb11@ then
+if $data02 != @select_tags_tb12@ then
return -1
endi
-if $data03 != 11 then
+if $data03 != 12 then
return -1
endi
-if $data04 != @abc11@ then
+if $data04 != @abc12@ then
return -1
endi
@@ -248,8 +248,8 @@ if $data04 != @abc12@ then
return -1
endi
-sql select bottom(c1, 100), tbname, t1, t2 from select_tags_mt0;
-if $rows != 100 then
+sql select bottom(c1, 72), tbname, t1, t2 from select_tags_mt0;
+if $rows != 72 then
return -1
endi
diff --git a/tests/script/general/parser/single_row_in_tb_query.sim b/tests/script/general/parser/single_row_in_tb_query.sim
index 1f9cb8b558c90323e18602005e275f067efeb345..acf85ea6922048e10ce8bd93c9eadb799649750f 100644
--- a/tests/script/general/parser/single_row_in_tb_query.sim
+++ b/tests/script/general/parser/single_row_in_tb_query.sim
@@ -193,3 +193,7 @@ endi
if $data04 != 1 then
return -1
endi
+
+print ===============>safty check TD-4927
+sql select first(ts, c1) from sr_stb where ts<1 group by t1;
+sql select first(ts, c1) from sr_stb where ts>0 and ts<1;
\ No newline at end of file
diff --git a/tests/script/general/parser/subInfrom.sim b/tests/script/general/parser/subInfrom.sim
deleted file mode 100644
index e47831ee8797e3a9a09ee933c7286740120623e6..0000000000000000000000000000000000000000
--- a/tests/script/general/parser/subInfrom.sim
+++ /dev/null
@@ -1,147 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/exec.sh -n dnode1 -s start
-sleep 100
-sql connect
-sleep 100
-
-print ========== sub_in_from.sim
-$i = 0
-
-$dbPrefix = subdb
-$tbPrefix = sub_tb
-$stbPrefix = sub_stb
-$tbNum = 10
-$rowNum = 1000
-$totalNum = $tbNum * $rowNum
-$loops = 200000
-$log = 10000
-$ts0 = 1537146000000
-$delta = 600000
-$i = 0
-$db = $dbPrefix . $i
-$stb = $stbPrefix . $i
-
-sql drop database $db -x step1
-step1:
-sql create database $db cache 16 maxrows 4096 keep 36500
-print ====== create tables
-sql use $db
-sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
-
-$i = 0
-$ts = $ts0
-$halfNum = $tbNum / 2
-while $i < $halfNum
- $tbId = $i + $halfNum
- $tb = $tbPrefix . $i
- $tb1 = $tbPrefix . $tbId
- sql create table $tb using $stb tags( $i )
- sql create table $tb1 using $stb tags( $tbId )
-
- $x = 0
- while $x < $rowNum
- $xs = $x * $delta
- $ts = $ts0 + $xs
- $c = $x / 10
- $c = $c * 10
- $c = $x - $c
- $binary = 'binary . $c
- $binary = $binary . '
- $nchar = 'nchar . $c
- $nchar = $nchar . '
- sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar )
- sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar )
- $x = $x + 1
- endw
-
- $i = $i + 1
-endw
-print ====== tables created
-
-sql_error select count(*) from (select count(*) from abc.sub_stb0)
-sql_error select val + 20 from (select count(*) from sub_stb0 interval(10h))
-sql_error select abc+20 from (select count(*) from sub_stb0 interval(1s))
-
-sql select count(*) from (select count(*) from sub_stb0 interval(10h))
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 18 then
- print expect 18, actual: $data00
- return -1
-endi
-
-sql select ts from (select count(*) from sub_stb0 interval(10h))
-if $rows != 18 then
- return -1
-endi
-
-if $data00 != @18-09-17 04:00:00.000@ then
- return -1
-endi
-
-if $data01 != @18-09-17 14:00:00.000@ then
- return -1
-endi
-
-sql select val + 20, val from (select count(*) as val from sub_stb0 interval(10h))
-if $rows != 18 then
- return -1
-endi
-
-if $data00 != 320.000000 then
- return -1
-endi
-
-if $data01 != 300 then
- return -1
-endi
-
-if $data10 != 620 then
- return -1
-endi
-
-if $data11 != 600 then
- return -1
-endi
-
-if $data20 != 620 then
- return -1
-endi
-
-if $data21 != 600 then
- return -1
-endi
-
-sql select max(val), min(val), max(val) - min(val) from (select count(*) val from sub_stb0 interval(10h))
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 600 then
- return -1
-endi
-
-if $data01 != 100 then
- return -1
-endi
-
-if $data02 != 500.000000 then
- return -1
-endi
-
-sql select first(ts,val),last(ts,val) from (select count(*) val from sub_stb0 interval(10h))
-sql select top(val, 5) from (select count(*) val from sub_stb0 interval(10h))
-sql select diff(val) from (select count(*) val from sub_stb0 interval(10h))
-sql select apercentile(val, 50) from (select count(*) val from sub_stb0 interval(10h))
-
-# not support yet
-sql select percentile(val, 50) from (select count(*) val from sub_stb0 interval(10h))
-sql select stddev(val) from (select count(*) val from sub_stb0 interval(10h))
-
-print ====================>complex query
-
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 5f711389662f16e660d6fdc88a2518b6d4221efc..d7f06769a8bc2afe3c2f95acc16953ddd3b188da 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -12,7 +12,7 @@ run general/parser/create_tb.sim
run general/parser/dbtbnameValidate.sim
run general/parser/fill.sim
run general/parser/fill_stb.sim
-#run general/parser/fill_us.sim #
+run general/parser/fill_us.sim
run general/parser/first_last.sim
run general/parser/import_commit1.sim
run general/parser/import_commit2.sim
diff --git a/tests/script/general/parser/top_groupby.sim b/tests/script/general/parser/top_groupby.sim
new file mode 100644
index 0000000000000000000000000000000000000000..5709f4d1d7210761292d59aefa8984dad2fd2f23
--- /dev/null
+++ b/tests/script/general/parser/top_groupby.sim
@@ -0,0 +1,52 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
+system sh/exec.sh -n dnode1 -s start
+
+sleep 100
+sql connect
+print ======================== dnode1 start
+
+$db = testdb
+
+sql create database $db
+sql use $db
+
+sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10))
+
+sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1");
+
+sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1")
+sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2")
+sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3")
+sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4")
+
+sql select top(f1, 2) from tb1 group by f1;
+
+if $rows != 5 then
+ return -1
+endi
+
+sql select bottom(f1, 2) from tb1 group by f1;
+
+if $rows != 5 then
+ return -1
+endi
+
+sql select top(f1, 100) from tb1 group by f1;
+
+if $rows != 8 then
+ return -1
+endi
+
+sql select bottom(f1, 100) from tb1 group by f1;
+
+if $rows != 8 then
+ return -1
+endi
+
diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim
index 2c6be78e13dbf0d236486fa006501123b40b9b1b..6b789de4903a6abd4ef7ad66a28a6008b588d4fb 100644
--- a/tests/script/general/parser/where.sim
+++ b/tests/script/general/parser/where.sim
@@ -340,18 +340,24 @@ if $rows != 0 then
return -1
endi
-print ==========================> td-4783
+print ==========================> td-4783,td-4792
sql create table where_ts(ts timestamp, f int)
sql insert into where_ts values('2021-06-19 16:22:00', 1);
sql insert into where_ts values('2021-06-19 16:23:00', 2);
sql insert into where_ts values('2021-06-19 16:24:00', 3);
sql insert into where_ts values('2021-06-19 16:25:00', 1);
sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
-if $row != 2 then
+if $rows != 2 then
return -1
endi
print $data00, $data01
if $data01 != 2 then
return -1
endi
+sql insert into where_ts values(now, 5);
+sleep 10
+sql select * from (select * from where_ts) where ts $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
+ print actual: $system_content
return -1
endi
@@ -149,6 +150,8 @@ endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all
print curl 127.0.0.1:7111/admin/all -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
+ print actual: $system_content
+ print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}
return -1
endi
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 2eb8ee1614b286f3827705865cf073a7eded0c88..2702d192d3f47022f05888f90ca89c4ef533fe44 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h
index 58314d2e5055f0716793342157f6c82d6d729b29..2e19dde3d9c52c20705d131f471a2e0e389589e4 100644
--- a/tests/tsim/inc/sim.h
+++ b/tests/tsim/inc/sim.h
@@ -87,6 +87,8 @@ enum {
SIM_CMD_RESTFUL,
SIM_CMD_TEST,
SIM_CMD_RETURN,
+ SIM_CMD_LINE_INSERT,
+ SIM_CMD_LINE_INSERT_ERROR,
SIM_CMD_END
};
@@ -172,6 +174,8 @@ bool simExecuteSqlCmd(SScript *script, char *option);
bool simExecuteSqlErrorCmd(SScript *script, char *rest);
bool simExecuteSqlSlowCmd(SScript *script, char *option);
bool simExecuteRestfulCmd(SScript *script, char *rest);
+bool simExecuteLineInsertCmd(SScript *script, char *option);
+bool simExecuteLineInsertErrorCmd(SScript *script, char *option);
void simVisuallizeOption(SScript *script, char *src, char *dst);
#endif
\ No newline at end of file
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index 7d74946e939bb5f34f81ef6a6aee56a31c4a6cfe..a05f46ce0de54628f289c937e959ccc3337e00a9 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -1067,3 +1067,49 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
return false;
}
+
+bool simExecuteLineInsertCmd(SScript *script, char *rest) {
+ char buf[TSDB_MAX_BINARY_LEN];
+
+ simVisuallizeOption(script, rest, buf);
+ rest = buf;
+
+ SCmdLine *line = &script->lines[script->linePos];
+
+ simInfo("script:%s, %s", script->fileName, rest);
+ simLogSql(buf, true);
+ char * lines[] = {rest};
+ int32_t ret = taos_insert_lines(script->taos, lines, 1);
+ if (ret == TSDB_CODE_SUCCESS) {
+ simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest);
+ script->linePos++;
+ return true;
+ } else {
+ sprintf(script->error, "lineNum: %d. line: %s failed, ret:%d:%s", line->lineNum, rest,
+ ret & 0XFFFF, tstrerror(ret));
+ return false;
+ }
+}
+
+bool simExecuteLineInsertErrorCmd(SScript *script, char *rest) {
+ char buf[TSDB_MAX_BINARY_LEN];
+
+ simVisuallizeOption(script, rest, buf);
+ rest = buf;
+
+ SCmdLine *line = &script->lines[script->linePos];
+
+ simInfo("script:%s, %s", script->fileName, rest);
+ simLogSql(buf, true);
+ char * lines[] = {rest};
+ int32_t ret = taos_insert_lines(script->taos, lines, 1);
+ if (ret == TSDB_CODE_SUCCESS) {
+ sprintf(script->error, "script:%s, taos:%p, %s executed. expect failed, but success.", script->fileName, script->taos, rest);
+ script->linePos++;
+ return false;
+ } else {
+ simDebug("lineNum: %d. line: %s failed, ret:%d:%s. Expect failed, so success", line->lineNum, rest,
+ ret & 0XFFFF, tstrerror(ret));
+ return true;
+ }
+}
diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c
index b909f5bd8fc10bea09afd65dc504ae35d6de3505..1acdcd2ac6eb0ecb66e2977dee7577393ed242ef 100644
--- a/tests/tsim/src/simParse.c
+++ b/tests/tsim/src/simParse.c
@@ -838,6 +838,38 @@ bool simParseRunBackCmd(char *rest, SCommand *pCmd, int32_t lineNum) {
return true;
}
+bool simParseLineInsertCmd(char* rest, SCommand* pCmd, int32_t lineNum) {
+ int32_t expLen;
+
+ rest++;
+ cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT;
+ cmdLine[numOfLines].lineNum = lineNum;
+ cmdLine[numOfLines].optionOffset = optionOffset;
+ expLen = (int32_t)strlen(rest);
+ memcpy(optionBuffer + optionOffset, rest, expLen);
+ optionOffset += expLen + 1;
+ *(optionBuffer + optionOffset - 1) = 0;
+
+ numOfLines++;
+ return true;
+}
+
+bool simParseLineInsertErrorCmd(char* rest, SCommand* pCmd, int32_t lineNum) {
+ int32_t expLen;
+
+ rest++;
+ cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT;
+ cmdLine[numOfLines].lineNum = lineNum;
+ cmdLine[numOfLines].optionOffset = optionOffset;
+ expLen = (int32_t)strlen(rest);
+ memcpy(optionBuffer + optionOffset, rest, expLen);
+ optionOffset += expLen + 1;
+ *(optionBuffer + optionOffset - 1) = 0;
+
+ numOfLines++;
+ return true;
+}
+
void simInitsimCmdList() {
int32_t cmdno;
memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand));
@@ -1049,4 +1081,20 @@ void simInitsimCmdList() {
simCmdList[cmdno].parseCmd = simParseReturnCmd;
simCmdList[cmdno].executeCmd = simExecuteReturnCmd;
simAddCmdIntoHash(&(simCmdList[cmdno]));
+
+ cmdno = SIM_CMD_LINE_INSERT;
+ simCmdList[cmdno].cmdno = cmdno;
+ strcpy(simCmdList[cmdno].name, "line_insert");
+ simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name);
+ simCmdList[cmdno].parseCmd = simParseLineInsertCmd;
+ simCmdList[cmdno].executeCmd = simExecuteLineInsertCmd;
+ simAddCmdIntoHash(&(simCmdList[cmdno]));
+
+ cmdno = SIM_CMD_LINE_INSERT_ERROR;
+ simCmdList[cmdno].cmdno = cmdno;
+ strcpy(simCmdList[cmdno].name, "line_insert_error");
+ simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name);
+ simCmdList[cmdno].parseCmd = simParseLineInsertErrorCmd;
+ simCmdList[cmdno].executeCmd = simExecuteLineInsertErrorCmd;
+ simAddCmdIntoHash(&(simCmdList[cmdno]));
}