+
+#include "llimits.h"
+#include "lua.h"
+
+
+/* tags for values visible from Lua */
+#define LAST_TAG LUA_TTHREAD
+
+#define NUM_TAGS (LAST_TAG+1)
+
+
+/*
+** Extra tags for non-values
+*/
+#define LUA_TPROTO (LAST_TAG+1)
+#define LUA_TUPVAL (LAST_TAG+2)
+#define LUA_TDEADKEY (LAST_TAG+3)
+
+
+/*
+** Union of all collectable objects
+*/
+typedef union GCObject GCObject;
+
+
+/*
+** Common Header for all collectable objects (in macro form, to be
+** included in other objects)
+*/
+#define CommonHeader GCObject *next; lu_byte tt; lu_byte marked
+
+
+/*
+** Common header in struct form
+*/
+typedef struct GCheader {
+ CommonHeader;
+} GCheader;
+
+
+
+
+/*
+** Union of all Lua values
+*/
+typedef union {
+ GCObject *gc;
+ void *p;
+ lua_Number n;
+ int b;
+} Value;
+
+
+/*
+** Tagged Values
+*/
+
+#define TValuefields Value value; int tt
+
+typedef struct lua_TValue {
+ TValuefields;
+} TValue;
+
+
+/* Macros to test type */
+#define ttisnil(o) (ttype(o) == LUA_TNIL)
+#define ttisnumber(o) (ttype(o) == LUA_TNUMBER)
+#define ttisstring(o) (ttype(o) == LUA_TSTRING)
+#define ttistable(o) (ttype(o) == LUA_TTABLE)
+#define ttisfunction(o) (ttype(o) == LUA_TFUNCTION)
+#define ttisboolean(o) (ttype(o) == LUA_TBOOLEAN)
+#define ttisuserdata(o) (ttype(o) == LUA_TUSERDATA)
+#define ttisthread(o) (ttype(o) == LUA_TTHREAD)
+#define ttislightuserdata(o) (ttype(o) == LUA_TLIGHTUSERDATA)
+
+/* Macros to access values */
+#define ttype(o) ((o)->tt)
+#define gcvalue(o) check_exp(iscollectable(o), (o)->value.gc)
+#define pvalue(o) check_exp(ttislightuserdata(o), (o)->value.p)
+#define nvalue(o) check_exp(ttisnumber(o), (o)->value.n)
+#define rawtsvalue(o) check_exp(ttisstring(o), &(o)->value.gc->ts)
+#define tsvalue(o) (&rawtsvalue(o)->tsv)
+#define rawuvalue(o) check_exp(ttisuserdata(o), &(o)->value.gc->u)
+#define uvalue(o) (&rawuvalue(o)->uv)
+#define clvalue(o) check_exp(ttisfunction(o), &(o)->value.gc->cl)
+#define hvalue(o) check_exp(ttistable(o), &(o)->value.gc->h)
+#define bvalue(o) check_exp(ttisboolean(o), (o)->value.b)
+#define thvalue(o) check_exp(ttisthread(o), &(o)->value.gc->th)
+
+#define l_isfalse(o) (ttisnil(o) || (ttisboolean(o) && bvalue(o) == 0))
+
+/*
+** for internal debug only
+*/
+#define checkconsistency(obj) \
+ lua_assert(!iscollectable(obj) || (ttype(obj) == (obj)->value.gc->gch.tt))
+
+#define checkliveness(g,obj) \
+ lua_assert(!iscollectable(obj) || \
+ ((ttype(obj) == (obj)->value.gc->gch.tt) && !isdead(g, (obj)->value.gc)))
+
+
+/* Macros to set values */
+#define setnilvalue(obj) ((obj)->tt=LUA_TNIL)
+
+#define setnvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.n=(x); i_o->tt=LUA_TNUMBER; }
+
+#define setpvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.p=(x); i_o->tt=LUA_TLIGHTUSERDATA; }
+
+#define setbvalue(obj,x) \
+ { TValue *i_o=(obj); i_o->value.b=(x); i_o->tt=LUA_TBOOLEAN; }
+
+#define setsvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TSTRING; \
+ checkliveness(G(L),i_o); }
+
+#define setuvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TUSERDATA; \
+ checkliveness(G(L),i_o); }
+
+#define setthvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTHREAD; \
+ checkliveness(G(L),i_o); }
+
+#define setclvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TFUNCTION; \
+ checkliveness(G(L),i_o); }
+
+#define sethvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTABLE; \
+ checkliveness(G(L),i_o); }
+
+#define setptvalue(L,obj,x) \
+ { TValue *i_o=(obj); \
+ i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TPROTO; \
+ checkliveness(G(L),i_o); }
+
+
+
+
+#define setobj(L,obj1,obj2) \
+ { const TValue *o2=(obj2); TValue *o1=(obj1); \
+ o1->value = o2->value; o1->tt=o2->tt; \
+ checkliveness(G(L),o1); }
+
+
+/*
+** different types of sets, according to destination
+*/
+
+/* from stack to (same) stack */
+#define setobjs2s setobj
+/* to stack (not from same stack) */
+#define setobj2s setobj
+#define setsvalue2s setsvalue
+#define sethvalue2s sethvalue
+#define setptvalue2s setptvalue
+/* from table to same table */
+#define setobjt2t setobj
+/* to table */
+#define setobj2t setobj
+/* to new object */
+#define setobj2n setobj
+#define setsvalue2n setsvalue
+
+#define setttype(obj, tt) (ttype(obj) = (tt))
+
+
+#define iscollectable(o) (ttype(o) >= LUA_TSTRING)
+
+
+
+typedef TValue *StkId; /* index to stack elements */
+
+
+/*
+** String headers for string table
+*/
+typedef union TString {
+ L_Umaxalign dummy; /* ensures maximum alignment for strings */
+ struct {
+ CommonHeader;
+ lu_byte reserved;
+ unsigned int hash;
+ size_t len;
+ } tsv;
+} TString;
+
+
+#define getstr(ts) cast(const char *, (ts) + 1)
+#define svalue(o) getstr(tsvalue(o))
+
+
+
+typedef union Udata {
+ L_Umaxalign dummy; /* ensures maximum alignment for `local' udata */
+ struct {
+ CommonHeader;
+ struct Table *metatable;
+ struct Table *env;
+ size_t len;
+ } uv;
+} Udata;
+
+
+
+
+/*
+** Function Prototypes
+*/
+typedef struct Proto {
+ CommonHeader;
+ TValue *k; /* constants used by the function */
+ Instruction *code;
+ struct Proto **p; /* functions defined inside the function */
+ int *lineinfo; /* map from opcodes to source lines */
+ struct LocVar *locvars; /* information about local variables */
+ TString **upvalues; /* upvalue names */
+ TString *source;
+ int sizeupvalues;
+ int sizek; /* size of `k' */
+ int sizecode;
+ int sizelineinfo;
+ int sizep; /* size of `p' */
+ int sizelocvars;
+ int linedefined;
+ int lastlinedefined;
+ GCObject *gclist;
+ lu_byte nups; /* number of upvalues */
+ lu_byte numparams;
+ lu_byte is_vararg;
+ lu_byte maxstacksize;
+} Proto;
+
+
+/* masks for new-style vararg */
+#define VARARG_HASARG 1
+#define VARARG_ISVARARG 2
+#define VARARG_NEEDSARG 4
+
+
+typedef struct LocVar {
+ TString *varname;
+ int startpc; /* first point where variable is active */
+ int endpc; /* first point where variable is dead */
+} LocVar;
+
+
+
+/*
+** Upvalues
+*/
+
+typedef struct UpVal {
+ CommonHeader;
+ TValue *v; /* points to stack or to its own value */
+ union {
+ TValue value; /* the value (when closed) */
+ struct { /* double linked list (when open) */
+ struct UpVal *prev;
+ struct UpVal *next;
+ } l;
+ } u;
+} UpVal;
+
+
+/*
+** Closures
+*/
+
+#define ClosureHeader \
+ CommonHeader; lu_byte isC; lu_byte nupvalues; GCObject *gclist; \
+ struct Table *env
+
+typedef struct CClosure {
+ ClosureHeader;
+ lua_CFunction f;
+ TValue upvalue[1];
+} CClosure;
+
+
+typedef struct LClosure {
+ ClosureHeader;
+ struct Proto *p;
+ UpVal *upvals[1];
+} LClosure;
+
+
+typedef union Closure {
+ CClosure c;
+ LClosure l;
+} Closure;
+
+
+#define iscfunction(o) (ttype(o) == LUA_TFUNCTION && clvalue(o)->c.isC)
+#define isLfunction(o) (ttype(o) == LUA_TFUNCTION && !clvalue(o)->c.isC)
+
+
+/*
+** Tables
+*/
+
+typedef union TKey {
+ struct {
+ TValuefields;
+ struct Node *next; /* for chaining */
+ } nk;
+ TValue tvk;
+} TKey;
+
+
+typedef struct Node {
+ TValue i_val;
+ TKey i_key;
+} Node;
+
+
+typedef struct Table {
+ CommonHeader;
+ lu_byte flags; /* 1<lsizenode)))
+
+
+#define luaO_nilobject (&luaO_nilobject_)
+
+LUAI_DATA const TValue luaO_nilobject_;
+
+#define ceillog2(x) (luaO_log2((x)-1) + 1)
+
+LUAI_FUNC int luaO_log2 (unsigned int x);
+LUAI_FUNC int luaO_int2fb (unsigned int x);
+LUAI_FUNC int luaO_fb2int (int x);
+LUAI_FUNC int luaO_rawequalObj (const TValue *t1, const TValue *t2);
+LUAI_FUNC int luaO_str2d (const char *s, lua_Number *result);
+LUAI_FUNC const char *luaO_pushvfstring (lua_State *L, const char *fmt,
+ va_list argp);
+LUAI_FUNC const char *luaO_pushfstring (lua_State *L, const char *fmt, ...);
+LUAI_FUNC void luaO_chunkid (char *out, const char *source, size_t len);
+
+
+#endif
+
diff --git a/deps/lua/src/lopcodes.c b/deps/lua/src/lopcodes.c
new file mode 100644
index 0000000000000000000000000000000000000000..bf9cd522c260aa7a36ff76e19fc1a330d19b6e9c
--- /dev/null
+++ b/deps/lua/src/lopcodes.c
@@ -0,0 +1,102 @@
+/*
+** $Id: lopcodes.c,v 1.37 2005/11/08 19:45:36 roberto Exp $
+** See Copyright Notice in lua.h
+*/
+
+
+#define lopcodes_c
+#define LUA_CORE
+
+
+#include "lopcodes.h"
+
+
+/* ORDER OP */
+
+const char *const luaP_opnames[NUM_OPCODES+1] = {
+ "MOVE",
+ "LOADK",
+ "LOADBOOL",
+ "LOADNIL",
+ "GETUPVAL",
+ "GETGLOBAL",
+ "GETTABLE",
+ "SETGLOBAL",
+ "SETUPVAL",
+ "SETTABLE",
+ "NEWTABLE",
+ "SELF",
+ "ADD",
+ "SUB",
+ "MUL",
+ "DIV",
+ "MOD",
+ "POW",
+ "UNM",
+ "NOT",
+ "LEN",
+ "CONCAT",
+ "JMP",
+ "EQ",
+ "LT",
+ "LE",
+ "TEST",
+ "TESTSET",
+ "CALL",
+ "TAILCALL",
+ "RETURN",
+ "FORLOOP",
+ "FORPREP",
+ "TFORLOOP",
+ "SETLIST",
+ "CLOSE",
+ "CLOSURE",
+ "VARARG",
+ NULL
+};
+
+
+#define opmode(t,a,b,c,m) (((t)<<7) | ((a)<<6) | ((b)<<4) | ((c)<<2) | (m))
+
+const lu_byte luaP_opmodes[NUM_OPCODES] = {
+/* T A B C mode opcode */
+ opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_MOVE */
+ ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_LOADK */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_LOADBOOL */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LOADNIL */
+ ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_GETUPVAL */
+ ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_GETGLOBAL */
+ ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_GETTABLE */
+ ,opmode(0, 0, OpArgK, OpArgN, iABx) /* OP_SETGLOBAL */
+ ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_SETUPVAL */
+ ,opmode(0, 0, OpArgK, OpArgK, iABC) /* OP_SETTABLE */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_NEWTABLE */
+ ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_SELF */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_ADD */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_SUB */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MUL */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_DIV */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MOD */
+ ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_POW */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_UNM */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_NOT */
+ ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LEN */
+ ,opmode(0, 1, OpArgR, OpArgR, iABC) /* OP_CONCAT */
+ ,opmode(0, 0, OpArgR, OpArgN, iAsBx) /* OP_JMP */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_EQ */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LT */
+ ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LE */
+ ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TEST */
+ ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TESTSET */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_CALL */
+ ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_TAILCALL */
+ ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_RETURN */
+ ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORLOOP */
+ ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORPREP */
+ ,opmode(1, 0, OpArgN, OpArgU, iABC) /* OP_TFORLOOP */
+ ,opmode(0, 0, OpArgU, OpArgU, iABC) /* OP_SETLIST */
+ ,opmode(0, 0, OpArgN, OpArgN, iABC) /* OP_CLOSE */
+ ,opmode(0, 1, OpArgU, OpArgN, iABx) /* OP_CLOSURE */
+ ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_VARARG */
+};
+
diff --git a/deps/lua/src/lopcodes.h b/deps/lua/src/lopcodes.h
new file mode 100644
index 0000000000000000000000000000000000000000..2834b1d74dadee7625025aeb6746bfdff404229f
--- /dev/null
+++ b/deps/lua/src/lopcodes.h
@@ -0,0 +1,268 @@
+/*
+** $Id: lopcodes.h,v 1.124 2005/12/02 18:42:08 roberto Exp $
+** Opcodes for Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lopcodes_h
+#define lopcodes_h
+
+#include "llimits.h"
+
+
+/*===========================================================================
+ We assume that instructions are unsigned numbers.
+ All instructions have an opcode in the first 6 bits.
+ Instructions can have the following fields:
+ `A' : 8 bits
+ `B' : 9 bits
+ `C' : 9 bits
+ `Bx' : 18 bits (`B' and `C' together)
+ `sBx' : signed Bx
+
+ A signed argument is represented in excess K; that is, the number
+ value is the unsigned value minus K. K is exactly the maximum value
+ for that argument (so that -max is represented by 0, and +max is
+ represented by 2*max), which is half the maximum for the corresponding
+ unsigned argument.
+===========================================================================*/
+
+
+enum OpMode {iABC, iABx, iAsBx}; /* basic instruction format */
+
+
+/*
+** size and position of opcode arguments.
+*/
+#define SIZE_C 9
+#define SIZE_B 9
+#define SIZE_Bx (SIZE_C + SIZE_B)
+#define SIZE_A 8
+
+#define SIZE_OP 6
+
+#define POS_OP 0
+#define POS_A (POS_OP + SIZE_OP)
+#define POS_C (POS_A + SIZE_A)
+#define POS_B (POS_C + SIZE_C)
+#define POS_Bx POS_C
+
+
+/*
+** limits for opcode arguments.
+** we use (signed) int to manipulate most arguments,
+** so they must fit in LUAI_BITSINT-1 bits (-1 for sign)
+*/
+#if SIZE_Bx < LUAI_BITSINT-1
+#define MAXARG_Bx ((1<>1) /* `sBx' is signed */
+#else
+#define MAXARG_Bx MAX_INT
+#define MAXARG_sBx MAX_INT
+#endif
+
+
+#define MAXARG_A ((1<>POS_OP) & MASK1(SIZE_OP,0)))
+#define SET_OPCODE(i,o) ((i) = (((i)&MASK0(SIZE_OP,POS_OP)) | \
+ ((cast(Instruction, o)<>POS_A) & MASK1(SIZE_A,0)))
+#define SETARG_A(i,u) ((i) = (((i)&MASK0(SIZE_A,POS_A)) | \
+ ((cast(Instruction, u)<>POS_B) & MASK1(SIZE_B,0)))
+#define SETARG_B(i,b) ((i) = (((i)&MASK0(SIZE_B,POS_B)) | \
+ ((cast(Instruction, b)<>POS_C) & MASK1(SIZE_C,0)))
+#define SETARG_C(i,b) ((i) = (((i)&MASK0(SIZE_C,POS_C)) | \
+ ((cast(Instruction, b)<>POS_Bx) & MASK1(SIZE_Bx,0)))
+#define SETARG_Bx(i,b) ((i) = (((i)&MASK0(SIZE_Bx,POS_Bx)) | \
+ ((cast(Instruction, b)< C) then pc++ */
+OP_TESTSET,/* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */
+
+OP_CALL,/* A B C R(A), ... ,R(A+C-2) := R(A)(R(A+1), ... ,R(A+B-1)) */
+OP_TAILCALL,/* A B C return R(A)(R(A+1), ... ,R(A+B-1)) */
+OP_RETURN,/* A B return R(A), ... ,R(A+B-2) (see note) */
+
+OP_FORLOOP,/* A sBx R(A)+=R(A+2);
+ if R(A) = R(A+1) then { pc+=sBx; R(A+3)=R(A) }*/
+OP_FORPREP,/* A sBx R(A)-=R(A+2); pc+=sBx */
+
+OP_TFORLOOP,/* A C R(A+3), ... ,R(A+3+C) := R(A)(R(A+1), R(A+2));
+ if R(A+3) ~= nil then { pc++; R(A+2)=R(A+3); } */
+OP_SETLIST,/* A B C R(A)[(C-1)*FPF+i] := R(A+i), 1 <= i <= B */
+
+OP_CLOSE,/* A close all variables in the stack up to (>=) R(A)*/
+OP_CLOSURE,/* A Bx R(A) := closure(KPROTO[Bx], R(A), ... ,R(A+n)) */
+
+OP_VARARG/* A B R(A), R(A+1), ..., R(A+B-1) = vararg */
+} OpCode;
+
+
+#define NUM_OPCODES (cast(int, OP_VARARG) + 1)
+
+
+
+/*===========================================================================
+ Notes:
+ (*) In OP_CALL, if (B == 0) then B = top. C is the number of returns - 1,
+ and can be 0: OP_CALL then sets `top' to last_result+1, so
+ next open instruction (OP_CALL, OP_RETURN, OP_SETLIST) may use `top'.
+
+ (*) In OP_VARARG, if (B == 0) then use actual number of varargs and
+ set top (like in OP_CALL with C == 0).
+
+ (*) In OP_RETURN, if (B == 0) then return up to `top'
+
+ (*) In OP_SETLIST, if (B == 0) then B = `top';
+ if (C == 0) then next `instruction' is real C
+
+ (*) For comparisons, A specifies what condition the test should accept
+ (true or false).
+
+ (*) All `skips' (pc++) assume that next instruction is a jump
+===========================================================================*/
+
+
+/*
+** masks for instruction properties. The format is:
+** bits 0-1: op mode
+** bits 2-3: C arg mode
+** bits 4-5: B arg mode
+** bit 6: instruction set register A
+** bit 7: operator is a test
+*/
+
+enum OpArgMask {
+ OpArgN, /* argument is not used */
+ OpArgU, /* argument is used */
+ OpArgR, /* argument is a register or a jump offset */
+ OpArgK /* argument is a constant or register/constant */
+};
+
+LUAI_DATA const lu_byte luaP_opmodes[NUM_OPCODES];
+
+#define getOpMode(m) (cast(enum OpMode, luaP_opmodes[m] & 3))
+#define getBMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 4) & 3))
+#define getCMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 2) & 3))
+#define testAMode(m) (luaP_opmodes[m] & (1 << 6))
+#define testTMode(m) (luaP_opmodes[m] & (1 << 7))
+
+
+LUAI_DATA const char *const luaP_opnames[NUM_OPCODES+1]; /* opcode names */
+
+
+/* number of list items to accumulate before a SETLIST instruction */
+#define LFIELDS_PER_FLUSH 50
+
+
+#endif
diff --git a/deps/lua/src/loslib.c b/deps/lua/src/loslib.c
new file mode 100644
index 0000000000000000000000000000000000000000..509d7b72e49dffc89dc4f62417d9350d0a7d5c8a
--- /dev/null
+++ b/deps/lua/src/loslib.c
@@ -0,0 +1,238 @@
+/*
+** $Id: loslib.c,v 1.17 2006/01/27 13:54:31 roberto Exp $
+** Standard Operating System library
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+#include
+
+#define loslib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+static int os_pushresult (lua_State *L, int i, const char *filename) {
+ int en = errno; /* calls to Lua API may change this value */
+ if (i) {
+ lua_pushboolean(L, 1);
+ return 1;
+ }
+ else {
+ lua_pushnil(L);
+ if (filename)
+ lua_pushfstring(L, "%s: %s", filename, strerror(en));
+ else
+ lua_pushfstring(L, "%s", strerror(en));
+ lua_pushinteger(L, en);
+ return 3;
+ }
+}
+
+
+static int os_execute (lua_State *L) {
+ lua_pushinteger(L, system(luaL_optstring(L, 1, NULL)));
+ return 1;
+}
+
+
+static int os_remove (lua_State *L) {
+ const char *filename = luaL_checkstring(L, 1);
+ return os_pushresult(L, remove(filename) == 0, filename);
+}
+
+
+static int os_rename (lua_State *L) {
+ const char *fromname = luaL_checkstring(L, 1);
+ const char *toname = luaL_checkstring(L, 2);
+ return os_pushresult(L, rename(fromname, toname) == 0, fromname);
+}
+
+
+static int os_tmpname (lua_State *L) {
+ char buff[LUA_TMPNAMBUFSIZE];
+ int err;
+ lua_tmpnam(buff, err);
+ if (err)
+ return luaL_error(L, "unable to generate a unique filename");
+ lua_pushstring(L, buff);
+ return 1;
+}
+
+
+static int os_getenv (lua_State *L) {
+ lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */
+ return 1;
+}
+
+
+static int os_clock (lua_State *L) {
+ lua_pushnumber(L, ((lua_Number)clock())/(lua_Number)CLOCKS_PER_SEC);
+ return 1;
+}
+
+
+/*
+** {======================================================
+** Time/Date operations
+** { year=%Y, month=%m, day=%d, hour=%H, min=%M, sec=%S,
+** wday=%w+1, yday=%j, isdst=? }
+** =======================================================
+*/
+
+static void setfield (lua_State *L, const char *key, int value) {
+ lua_pushinteger(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setboolfield (lua_State *L, const char *key, int value) {
+ if (value < 0) /* undefined? */
+ return; /* does not set field */
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int getboolfield (lua_State *L, const char *key) {
+ int res;
+ lua_getfield(L, -1, key);
+ res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+}
+
+
+static int getfield (lua_State *L, const char *key, int d) {
+ int res;
+ lua_getfield(L, -1, key);
+ if (lua_isnumber(L, -1))
+ res = (int)lua_tointeger(L, -1);
+ else {
+ if (d < 0)
+ return luaL_error(L, "field " LUA_QS " missing in date table", key);
+ res = d;
+ }
+ lua_pop(L, 1);
+ return res;
+}
+
+
+static int os_date (lua_State *L) {
+ const char *s = luaL_optstring(L, 1, "%c");
+ time_t t = lua_isnoneornil(L, 2) ? time(NULL) :
+ (time_t)luaL_checknumber(L, 2);
+ struct tm *stm;
+ if (*s == '!') { /* UTC? */
+ stm = gmtime(&t);
+ s++; /* skip `!' */
+ }
+ else
+ stm = localtime(&t);
+ if (stm == NULL) /* invalid date? */
+ lua_pushnil(L);
+ else if (strcmp(s, "*t") == 0) {
+ lua_createtable(L, 0, 9); /* 9 = number of fields */
+ setfield(L, "sec", stm->tm_sec);
+ setfield(L, "min", stm->tm_min);
+ setfield(L, "hour", stm->tm_hour);
+ setfield(L, "day", stm->tm_mday);
+ setfield(L, "month", stm->tm_mon+1);
+ setfield(L, "year", stm->tm_year+1900);
+ setfield(L, "wday", stm->tm_wday+1);
+ setfield(L, "yday", stm->tm_yday+1);
+ setboolfield(L, "isdst", stm->tm_isdst);
+ }
+ else {
+ char b[256];
+ if (strftime(b, sizeof(b), s, stm))
+ lua_pushstring(L, b);
+ else
+ return luaL_error(L, LUA_QL("date") " format too long");
+ }
+ return 1;
+}
+
+
+static int os_time (lua_State *L) {
+ time_t t;
+ if (lua_isnoneornil(L, 1)) /* called without args? */
+ t = time(NULL); /* get current time */
+ else {
+ struct tm ts;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_settop(L, 1); /* make sure table is at the top */
+ ts.tm_sec = getfield(L, "sec", 0);
+ ts.tm_min = getfield(L, "min", 0);
+ ts.tm_hour = getfield(L, "hour", 12);
+ ts.tm_mday = getfield(L, "day", -1);
+ ts.tm_mon = getfield(L, "month", -1) - 1;
+ ts.tm_year = getfield(L, "year", -1) - 1900;
+ ts.tm_isdst = getboolfield(L, "isdst");
+ t = mktime(&ts);
+ }
+ if (t == (time_t)(-1))
+ lua_pushnil(L);
+ else
+ lua_pushnumber(L, (lua_Number)t);
+ return 1;
+}
+
+
+static int os_difftime (lua_State *L) {
+ lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)),
+ (time_t)(luaL_optnumber(L, 2, 0))));
+ return 1;
+}
+
+/* }====================================================== */
+
+
+static int os_setlocale (lua_State *L) {
+ static const int cat[] = {LC_ALL, LC_COLLATE, LC_CTYPE, LC_MONETARY,
+ LC_NUMERIC, LC_TIME};
+ static const char *const catnames[] = {"all", "collate", "ctype", "monetary",
+ "numeric", "time", NULL};
+ const char *l = lua_tostring(L, 1);
+ int op = luaL_checkoption(L, 2, "all", catnames);
+ luaL_argcheck(L, l || lua_isnoneornil(L, 1), 1, "string expected");
+ lua_pushstring(L, setlocale(cat[op], l));
+ return 1;
+}
+
+
+static int os_exit (lua_State *L) {
+ exit(luaL_optint(L, 1, EXIT_SUCCESS));
+ return 0; /* to avoid warnings */
+}
+
+static const luaL_Reg syslib[] = {
+ {"clock", os_clock},
+ {"date", os_date},
+ {"difftime", os_difftime},
+ {"execute", os_execute},
+ {"exit", os_exit},
+ {"getenv", os_getenv},
+ {"remove", os_remove},
+ {"rename", os_rename},
+ {"setlocale", os_setlocale},
+ {"time", os_time},
+ {"tmpname", os_tmpname},
+ {NULL, NULL}
+};
+
+/* }====================================================== */
+
+
+
+LUALIB_API int luaopen_os (lua_State *L) {
+ luaL_register(L, LUA_OSLIBNAME, syslib);
+ return 1;
+}
+
diff --git a/deps/lua/src/lparser.c b/deps/lua/src/lparser.c
new file mode 100644
index 0000000000000000000000000000000000000000..b40ee794fe785d60578b49c8471e23c590703126
--- /dev/null
+++ b/deps/lua/src/lparser.c
@@ -0,0 +1,1336 @@
+/*
+** $Id: lparser.c,v 2.40 2005/12/22 16:19:56 roberto Exp $
+** Lua Parser
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lparser_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lcode.h"
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "llex.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lparser.h"
+#include "lstate.h"
+#include "lstring.h"
+
+
+
+
+#define hasmultret(k) ((k) == VCALL || (k) == VVARARG)
+
+#define getlocvar(fs, i) ((fs)->f->locvars[(fs)->actvar[i]])
+
+#define luaY_checklimit(fs,v,l,m) if ((v)>(l)) errorlimit(fs,l,m)
+
+
+/*
+** nodes for block list (list of active blocks)
+*/
+typedef struct BlockCnt {
+ struct BlockCnt *previous; /* chain */
+ int breaklist; /* list of jumps out of this loop */
+ lu_byte nactvar; /* # active locals outside the breakable structure */
+ lu_byte upval; /* true if some variable in the block is an upvalue */
+ lu_byte isbreakable; /* true if `block' is a loop */
+} BlockCnt;
+
+
+
+/*
+** prototypes for recursive non-terminal functions
+*/
+static void chunk (LexState *ls);
+static void expr (LexState *ls, expdesc *v);
+
+
+static void anchor_token (LexState *ls) {
+ if (ls->t.token == TK_NAME || ls->t.token == TK_STRING) {
+ TString *ts = ls->t.seminfo.ts;
+ luaX_newstring(ls, getstr(ts), ts->tsv.len);
+ }
+}
+
+
+static void error_expected (LexState *ls, int token) {
+ luaX_syntaxerror(ls,
+ luaO_pushfstring(ls->L, LUA_QS " expected", luaX_token2str(ls, token)));
+}
+
+
+static void errorlimit (FuncState *fs, int limit, const char *what) {
+ const char *msg = (fs->f->linedefined == 0) ?
+ luaO_pushfstring(fs->L, "main function has more than %d %s", limit, what) :
+ luaO_pushfstring(fs->L, "function at line %d has more than %d %s",
+ fs->f->linedefined, limit, what);
+ luaX_lexerror(fs->ls, msg, 0);
+}
+
+
+static int testnext (LexState *ls, int c) {
+ if (ls->t.token == c) {
+ luaX_next(ls);
+ return 1;
+ }
+ else return 0;
+}
+
+
+static void check (LexState *ls, int c) {
+ if (ls->t.token != c)
+ error_expected(ls, c);
+}
+
+static void checknext (LexState *ls, int c) {
+ check(ls, c);
+ luaX_next(ls);
+}
+
+
+#define check_condition(ls,c,msg) { if (!(c)) luaX_syntaxerror(ls, msg); }
+
+
+
+static void check_match (LexState *ls, int what, int who, int where) {
+ if (!testnext(ls, what)) {
+ if (where == ls->linenumber)
+ error_expected(ls, what);
+ else {
+ luaX_syntaxerror(ls, luaO_pushfstring(ls->L,
+ LUA_QS " expected (to close " LUA_QS " at line %d)",
+ luaX_token2str(ls, what), luaX_token2str(ls, who), where));
+ }
+ }
+}
+
+
+static TString *str_checkname (LexState *ls) {
+ TString *ts;
+ check(ls, TK_NAME);
+ ts = ls->t.seminfo.ts;
+ luaX_next(ls);
+ return ts;
+}
+
+
+static void init_exp (expdesc *e, expkind k, int i) {
+ e->f = e->t = NO_JUMP;
+ e->k = k;
+ e->u.s.info = i;
+}
+
+
+static void codestring (LexState *ls, expdesc *e, TString *s) {
+ init_exp(e, VK, luaK_stringK(ls->fs, s));
+}
+
+
+static void checkname(LexState *ls, expdesc *e) {
+ codestring(ls, e, str_checkname(ls));
+}
+
+
+static int registerlocalvar (LexState *ls, TString *varname) {
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int oldsize = f->sizelocvars;
+ luaM_growvector(ls->L, f->locvars, fs->nlocvars, f->sizelocvars,
+ LocVar, SHRT_MAX, "too many local variables");
+ while (oldsize < f->sizelocvars) f->locvars[oldsize++].varname = NULL;
+ f->locvars[fs->nlocvars].varname = varname;
+ luaC_objbarrier(ls->L, f, varname);
+ return fs->nlocvars++;
+}
+
+
+#define new_localvarliteral(ls,v,n) \
+ new_localvar(ls, luaX_newstring(ls, "" v, (sizeof(v)/sizeof(char))-1), n)
+
+
+static void new_localvar (LexState *ls, TString *name, int n) {
+ FuncState *fs = ls->fs;
+ luaY_checklimit(fs, fs->nactvar+n+1, LUAI_MAXVARS, "local variables");
+ fs->actvar[fs->nactvar+n] = cast(unsigned short, registerlocalvar(ls, name));
+}
+
+
+static void adjustlocalvars (LexState *ls, int nvars) {
+ FuncState *fs = ls->fs;
+ fs->nactvar = cast_byte(fs->nactvar + nvars);
+ for (; nvars; nvars--) {
+ getlocvar(fs, fs->nactvar - nvars).startpc = fs->pc;
+ }
+}
+
+
+static void removevars (LexState *ls, int tolevel) {
+ FuncState *fs = ls->fs;
+ while (fs->nactvar > tolevel)
+ getlocvar(fs, --fs->nactvar).endpc = fs->pc;
+}
+
+
+static int indexupvalue (FuncState *fs, TString *name, expdesc *v) {
+ int i;
+ Proto *f = fs->f;
+ int oldsize = f->sizeupvalues;
+ for (i=0; inups; i++) {
+ if (fs->upvalues[i].k == v->k && fs->upvalues[i].info == v->u.s.info) {
+ lua_assert(f->upvalues[i] == name);
+ return i;
+ }
+ }
+ /* new one */
+ luaY_checklimit(fs, f->nups + 1, LUAI_MAXUPVALUES, "upvalues");
+ luaM_growvector(fs->L, f->upvalues, f->nups, f->sizeupvalues,
+ TString *, MAX_INT, "");
+ while (oldsize < f->sizeupvalues) f->upvalues[oldsize++] = NULL;
+ f->upvalues[f->nups] = name;
+ luaC_objbarrier(fs->L, f, name);
+ lua_assert(v->k == VLOCAL || v->k == VUPVAL);
+ fs->upvalues[f->nups].k = cast_byte(v->k);
+ fs->upvalues[f->nups].info = cast_byte(v->u.s.info);
+ return f->nups++;
+}
+
+
+static int searchvar (FuncState *fs, TString *n) {
+ int i;
+ for (i=fs->nactvar-1; i >= 0; i--) {
+ if (n == getlocvar(fs, i).varname)
+ return i;
+ }
+ return -1; /* not found */
+}
+
+
+static void markupval (FuncState *fs, int level) {
+ BlockCnt *bl = fs->bl;
+ while (bl && bl->nactvar > level) bl = bl->previous;
+ if (bl) bl->upval = 1;
+}
+
+
+static int singlevaraux (FuncState *fs, TString *n, expdesc *var, int base) {
+ if (fs == NULL) { /* no more levels? */
+ init_exp(var, VGLOBAL, NO_REG); /* default is global variable */
+ return VGLOBAL;
+ }
+ else {
+ int v = searchvar(fs, n); /* look up at current level */
+ if (v >= 0) {
+ init_exp(var, VLOCAL, v);
+ if (!base)
+ markupval(fs, v); /* local will be used as an upval */
+ return VLOCAL;
+ }
+ else { /* not found at current level; try upper one */
+ if (singlevaraux(fs->prev, n, var, 0) == VGLOBAL)
+ return VGLOBAL;
+ var->u.s.info = indexupvalue(fs, n, var); /* else was LOCAL or UPVAL */
+ var->k = VUPVAL; /* upvalue in this level */
+ return VUPVAL;
+ }
+ }
+}
+
+
+static void singlevar (LexState *ls, expdesc *var) {
+ TString *varname = str_checkname(ls);
+ FuncState *fs = ls->fs;
+ if (singlevaraux(fs, varname, var, 1) == VGLOBAL)
+ var->u.s.info = luaK_stringK(fs, varname); /* info points to global name */
+}
+
+
+static void adjust_assign (LexState *ls, int nvars, int nexps, expdesc *e) {
+ FuncState *fs = ls->fs;
+ int extra = nvars - nexps;
+ if (hasmultret(e->k)) {
+ extra++; /* includes call itself */
+ if (extra < 0) extra = 0;
+ luaK_setreturns(fs, e, extra); /* last exp. provides the difference */
+ if (extra > 1) luaK_reserveregs(fs, extra-1);
+ }
+ else {
+ if (e->k != VVOID) luaK_exp2nextreg(fs, e); /* close last expression */
+ if (extra > 0) {
+ int reg = fs->freereg;
+ luaK_reserveregs(fs, extra);
+ luaK_nil(fs, reg, extra);
+ }
+ }
+}
+
+
+static void enterlevel (LexState *ls) {
+ if (++ls->L->nCcalls > LUAI_MAXCCALLS)
+ luaX_lexerror(ls, "chunk has too many syntax levels", 0);
+}
+
+
+#define leavelevel(ls) ((ls)->L->nCcalls--)
+
+
+static void enterblock (FuncState *fs, BlockCnt *bl, lu_byte isbreakable) {
+ bl->breaklist = NO_JUMP;
+ bl->isbreakable = isbreakable;
+ bl->nactvar = fs->nactvar;
+ bl->upval = 0;
+ bl->previous = fs->bl;
+ fs->bl = bl;
+ lua_assert(fs->freereg == fs->nactvar);
+}
+
+
+static void leaveblock (FuncState *fs) {
+ BlockCnt *bl = fs->bl;
+ fs->bl = bl->previous;
+ removevars(fs->ls, bl->nactvar);
+ if (bl->upval)
+ luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0);
+ lua_assert(!bl->isbreakable || !bl->upval); /* loops have no body */
+ lua_assert(bl->nactvar == fs->nactvar);
+ fs->freereg = fs->nactvar; /* free registers */
+ luaK_patchtohere(fs, bl->breaklist);
+}
+
+
+static void pushclosure (LexState *ls, FuncState *func, expdesc *v) {
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int oldsize = f->sizep;
+ int i;
+ luaM_growvector(ls->L, f->p, fs->np, f->sizep, Proto *,
+ MAXARG_Bx, "constant table overflow");
+ while (oldsize < f->sizep) f->p[oldsize++] = NULL;
+ f->p[fs->np++] = func->f;
+ luaC_objbarrier(ls->L, f, func->f);
+ init_exp(v, VRELOCABLE, luaK_codeABx(fs, OP_CLOSURE, 0, fs->np-1));
+ for (i=0; if->nups; i++) {
+ OpCode o = (func->upvalues[i].k == VLOCAL) ? OP_MOVE : OP_GETUPVAL;
+ luaK_codeABC(fs, o, 0, func->upvalues[i].info, 0);
+ }
+}
+
+
+static void open_func (LexState *ls, FuncState *fs) {
+ lua_State *L = ls->L;
+ Proto *f = luaF_newproto(L);
+ fs->f = f;
+ fs->prev = ls->fs; /* linked list of funcstates */
+ fs->ls = ls;
+ fs->L = L;
+ ls->fs = fs;
+ fs->pc = 0;
+ fs->lasttarget = -1;
+ fs->jpc = NO_JUMP;
+ fs->freereg = 0;
+ fs->nk = 0;
+ fs->np = 0;
+ fs->nlocvars = 0;
+ fs->nactvar = 0;
+ fs->bl = NULL;
+ f->source = ls->source;
+ f->maxstacksize = 2; /* registers 0/1 are always valid */
+ fs->h = luaH_new(L, 0, 0);
+ /* anchor table of constants and prototype (to avoid being collected) */
+ sethvalue2s(L, L->top, fs->h);
+ incr_top(L);
+ setptvalue2s(L, L->top, f);
+ incr_top(L);
+}
+
+
+static void close_func (LexState *ls) {
+ lua_State *L = ls->L;
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ removevars(ls, 0);
+ luaK_ret(fs, 0, 0); /* final return */
+ luaM_reallocvector(L, f->code, f->sizecode, fs->pc, Instruction);
+ f->sizecode = fs->pc;
+ luaM_reallocvector(L, f->lineinfo, f->sizelineinfo, fs->pc, int);
+ f->sizelineinfo = fs->pc;
+ luaM_reallocvector(L, f->k, f->sizek, fs->nk, TValue);
+ f->sizek = fs->nk;
+ luaM_reallocvector(L, f->p, f->sizep, fs->np, Proto *);
+ f->sizep = fs->np;
+ luaM_reallocvector(L, f->locvars, f->sizelocvars, fs->nlocvars, LocVar);
+ f->sizelocvars = fs->nlocvars;
+ luaM_reallocvector(L, f->upvalues, f->sizeupvalues, f->nups, TString *);
+ f->sizeupvalues = f->nups;
+ lua_assert(luaG_checkcode(f));
+ lua_assert(fs->bl == NULL);
+ ls->fs = fs->prev;
+ L->top -= 2; /* remove table and prototype from the stack */
+ /* last token read was anchored in defunct function; must reanchor it */
+ if (fs) anchor_token(ls);
+}
+
+
+Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) {
+ struct LexState lexstate;
+ struct FuncState funcstate;
+ lexstate.buff = buff;
+ luaX_setinput(L, &lexstate, z, luaS_new(L, name));
+ open_func(&lexstate, &funcstate);
+ funcstate.f->is_vararg = VARARG_ISVARARG; /* main func. is always vararg */
+ luaX_next(&lexstate); /* read first token */
+ chunk(&lexstate);
+ check(&lexstate, TK_EOS);
+ close_func(&lexstate);
+ lua_assert(funcstate.prev == NULL);
+ lua_assert(funcstate.f->nups == 0);
+ lua_assert(lexstate.fs == NULL);
+ return funcstate.f;
+}
+
+
+
+/*============================================================*/
+/* GRAMMAR RULES */
+/*============================================================*/
+
+
+static void field (LexState *ls, expdesc *v) {
+ /* field -> ['.' | ':'] NAME */
+ FuncState *fs = ls->fs;
+ expdesc key;
+ luaK_exp2anyreg(fs, v);
+ luaX_next(ls); /* skip the dot or colon */
+ checkname(ls, &key);
+ luaK_indexed(fs, v, &key);
+}
+
+
+static void yindex (LexState *ls, expdesc *v) {
+ /* index -> '[' expr ']' */
+ luaX_next(ls); /* skip the '[' */
+ expr(ls, v);
+ luaK_exp2val(ls->fs, v);
+ checknext(ls, ']');
+}
+
+
+/*
+** {======================================================================
+** Rules for Constructors
+** =======================================================================
+*/
+
+
+struct ConsControl {
+ expdesc v; /* last list item read */
+ expdesc *t; /* table descriptor */
+ int nh; /* total number of `record' elements */
+ int na; /* total number of array elements */
+ int tostore; /* number of array elements pending to be stored */
+};
+
+
+static void recfield (LexState *ls, struct ConsControl *cc) {
+ /* recfield -> (NAME | `['exp1`]') = exp1 */
+ FuncState *fs = ls->fs;
+ int reg = ls->fs->freereg;
+ expdesc key, val;
+ if (ls->t.token == TK_NAME) {
+ luaY_checklimit(fs, cc->nh, MAX_INT, "items in a constructor");
+ checkname(ls, &key);
+ }
+ else /* ls->t.token == '[' */
+ yindex(ls, &key);
+ cc->nh++;
+ checknext(ls, '=');
+ luaK_exp2RK(fs, &key);
+ expr(ls, &val);
+ luaK_codeABC(fs, OP_SETTABLE, cc->t->u.s.info, luaK_exp2RK(fs, &key),
+ luaK_exp2RK(fs, &val));
+ fs->freereg = reg; /* free registers */
+}
+
+
+static void closelistfield (FuncState *fs, struct ConsControl *cc) {
+ if (cc->v.k == VVOID) return; /* there is no list item */
+ luaK_exp2nextreg(fs, &cc->v);
+ cc->v.k = VVOID;
+ if (cc->tostore == LFIELDS_PER_FLUSH) {
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); /* flush */
+ cc->tostore = 0; /* no more items pending */
+ }
+}
+
+
+static void lastlistfield (FuncState *fs, struct ConsControl *cc) {
+ if (cc->tostore == 0) return;
+ if (hasmultret(cc->v.k)) {
+ luaK_setmultret(fs, &cc->v);
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, LUA_MULTRET);
+ cc->na--; /* do not count last expression (unknown number of elements) */
+ }
+ else {
+ if (cc->v.k != VVOID)
+ luaK_exp2nextreg(fs, &cc->v);
+ luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore);
+ }
+}
+
+
+static void listfield (LexState *ls, struct ConsControl *cc) {
+ expr(ls, &cc->v);
+ luaY_checklimit(ls->fs, cc->na, MAXARG_Bx, "items in a constructor");
+ cc->na++;
+ cc->tostore++;
+}
+
+
+static void constructor (LexState *ls, expdesc *t) {
+ /* constructor -> ?? */
+ FuncState *fs = ls->fs;
+ int line = ls->linenumber;
+ int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0);
+ struct ConsControl cc;
+ cc.na = cc.nh = cc.tostore = 0;
+ cc.t = t;
+ init_exp(t, VRELOCABLE, pc);
+ init_exp(&cc.v, VVOID, 0); /* no value (yet) */
+ luaK_exp2nextreg(ls->fs, t); /* fix it at stack top (for gc) */
+ checknext(ls, '{');
+ do {
+ lua_assert(cc.v.k == VVOID || cc.tostore > 0);
+ if (ls->t.token == '}') break;
+ closelistfield(fs, &cc);
+ switch(ls->t.token) {
+ case TK_NAME: { /* may be listfields or recfields */
+ luaX_lookahead(ls);
+ if (ls->lookahead.token != '=') /* expression? */
+ listfield(ls, &cc);
+ else
+ recfield(ls, &cc);
+ break;
+ }
+ case '[': { /* constructor_item -> recfield */
+ recfield(ls, &cc);
+ break;
+ }
+ default: { /* constructor_part -> listfield */
+ listfield(ls, &cc);
+ break;
+ }
+ }
+ } while (testnext(ls, ',') || testnext(ls, ';'));
+ check_match(ls, '}', '{', line);
+ lastlistfield(fs, &cc);
+ SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */
+ SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */
+}
+
+/* }====================================================================== */
+
+
+
+static void parlist (LexState *ls) {
+ /* parlist -> [ param { `,' param } ] */
+ FuncState *fs = ls->fs;
+ Proto *f = fs->f;
+ int nparams = 0;
+ f->is_vararg = 0;
+ if (ls->t.token != ')') { /* is `parlist' not empty? */
+ do {
+ switch (ls->t.token) {
+ case TK_NAME: { /* param -> NAME */
+ new_localvar(ls, str_checkname(ls), nparams++);
+ break;
+ }
+ case TK_DOTS: { /* param -> `...' */
+ luaX_next(ls);
+#if defined(LUA_COMPAT_VARARG)
+ /* use `arg' as default name */
+ new_localvarliteral(ls, "arg", nparams++);
+ f->is_vararg = VARARG_HASARG | VARARG_NEEDSARG;
+#endif
+ f->is_vararg |= VARARG_ISVARARG;
+ break;
+ }
+ default: luaX_syntaxerror(ls, " or " LUA_QL("...") " expected");
+ }
+ } while (!f->is_vararg && testnext(ls, ','));
+ }
+ adjustlocalvars(ls, nparams);
+ f->numparams = cast_byte(fs->nactvar - (f->is_vararg & VARARG_HASARG));
+ luaK_reserveregs(fs, fs->nactvar); /* reserve register for parameters */
+}
+
+
+static void body (LexState *ls, expdesc *e, int needself, int line) {
+ /* body -> `(' parlist `)' chunk END */
+ FuncState new_fs;
+ open_func(ls, &new_fs);
+ new_fs.f->linedefined = line;
+ checknext(ls, '(');
+ if (needself) {
+ new_localvarliteral(ls, "self", 0);
+ adjustlocalvars(ls, 1);
+ }
+ parlist(ls);
+ checknext(ls, ')');
+ chunk(ls);
+ new_fs.f->lastlinedefined = ls->linenumber;
+ check_match(ls, TK_END, TK_FUNCTION, line);
+ close_func(ls);
+ pushclosure(ls, &new_fs, e);
+}
+
+
+static int explist1 (LexState *ls, expdesc *v) {
+ /* explist1 -> expr { `,' expr } */
+ int n = 1; /* at least one expression */
+ expr(ls, v);
+ while (testnext(ls, ',')) {
+ luaK_exp2nextreg(ls->fs, v);
+ expr(ls, v);
+ n++;
+ }
+ return n;
+}
+
+
+static void funcargs (LexState *ls, expdesc *f) {
+ FuncState *fs = ls->fs;
+ expdesc args;
+ int base, nparams;
+ int line = ls->linenumber;
+ switch (ls->t.token) {
+ case '(': { /* funcargs -> `(' [ explist1 ] `)' */
+ if (line != ls->lastline)
+ luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)");
+ luaX_next(ls);
+ if (ls->t.token == ')') /* arg list is empty? */
+ args.k = VVOID;
+ else {
+ explist1(ls, &args);
+ luaK_setmultret(fs, &args);
+ }
+ check_match(ls, ')', '(', line);
+ break;
+ }
+ case '{': { /* funcargs -> constructor */
+ constructor(ls, &args);
+ break;
+ }
+ case TK_STRING: { /* funcargs -> STRING */
+ codestring(ls, &args, ls->t.seminfo.ts);
+ luaX_next(ls); /* must use `seminfo' before `next' */
+ break;
+ }
+ default: {
+ luaX_syntaxerror(ls, "function arguments expected");
+ return;
+ }
+ }
+ lua_assert(f->k == VNONRELOC);
+ base = f->u.s.info; /* base register for call */
+ if (hasmultret(args.k))
+ nparams = LUA_MULTRET; /* open call */
+ else {
+ if (args.k != VVOID)
+ luaK_exp2nextreg(fs, &args); /* close last argument */
+ nparams = fs->freereg - (base+1);
+ }
+ init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2));
+ luaK_fixline(fs, line);
+ fs->freereg = base+1; /* call remove function and arguments and leaves
+ (unless changed) one result */
+}
+
+
+
+
+/*
+** {======================================================================
+** Expression parsing
+** =======================================================================
+*/
+
+
+static void prefixexp (LexState *ls, expdesc *v) {
+ /* prefixexp -> NAME | '(' expr ')' */
+ switch (ls->t.token) {
+ case '(': {
+ int line = ls->linenumber;
+ luaX_next(ls);
+ expr(ls, v);
+ check_match(ls, ')', '(', line);
+ luaK_dischargevars(ls->fs, v);
+ return;
+ }
+ case TK_NAME: {
+ singlevar(ls, v);
+ return;
+ }
+ default: {
+ luaX_syntaxerror(ls, "unexpected symbol");
+ return;
+ }
+ }
+}
+
+
+static void primaryexp (LexState *ls, expdesc *v) {
+ /* primaryexp ->
+ prefixexp { `.' NAME | `[' exp `]' | `:' NAME funcargs | funcargs } */
+ FuncState *fs = ls->fs;
+ prefixexp(ls, v);
+ for (;;) {
+ switch (ls->t.token) {
+ case '.': { /* field */
+ field(ls, v);
+ break;
+ }
+ case '[': { /* `[' exp1 `]' */
+ expdesc key;
+ luaK_exp2anyreg(fs, v);
+ yindex(ls, &key);
+ luaK_indexed(fs, v, &key);
+ break;
+ }
+ case ':': { /* `:' NAME funcargs */
+ expdesc key;
+ luaX_next(ls);
+ checkname(ls, &key);
+ luaK_self(fs, v, &key);
+ funcargs(ls, v);
+ break;
+ }
+ case '(': case TK_STRING: case '{': { /* funcargs */
+ luaK_exp2nextreg(fs, v);
+ funcargs(ls, v);
+ break;
+ }
+ default: return;
+ }
+ }
+}
+
+
+static void simpleexp (LexState *ls, expdesc *v) {
+ /* simpleexp -> NUMBER | STRING | NIL | true | false | ... |
+ constructor | FUNCTION body | primaryexp */
+ switch (ls->t.token) {
+ case TK_NUMBER: {
+ init_exp(v, VKNUM, 0);
+ v->u.nval = ls->t.seminfo.r;
+ break;
+ }
+ case TK_STRING: {
+ codestring(ls, v, ls->t.seminfo.ts);
+ break;
+ }
+ case TK_NIL: {
+ init_exp(v, VNIL, 0);
+ break;
+ }
+ case TK_TRUE: {
+ init_exp(v, VTRUE, 0);
+ break;
+ }
+ case TK_FALSE: {
+ init_exp(v, VFALSE, 0);
+ break;
+ }
+ case TK_DOTS: { /* vararg */
+ FuncState *fs = ls->fs;
+ check_condition(ls, fs->f->is_vararg,
+ "cannot use " LUA_QL("...") " outside a vararg function");
+ fs->f->is_vararg &= ~VARARG_NEEDSARG; /* don't need 'arg' */
+ init_exp(v, VVARARG, luaK_codeABC(fs, OP_VARARG, 0, 1, 0));
+ break;
+ }
+ case '{': { /* constructor */
+ constructor(ls, v);
+ return;
+ }
+ case TK_FUNCTION: {
+ luaX_next(ls);
+ body(ls, v, 0, ls->linenumber);
+ return;
+ }
+ default: {
+ primaryexp(ls, v);
+ return;
+ }
+ }
+ luaX_next(ls);
+}
+
+
+static UnOpr getunopr (int op) {
+ switch (op) {
+ case TK_NOT: return OPR_NOT;
+ case '-': return OPR_MINUS;
+ case '#': return OPR_LEN;
+ default: return OPR_NOUNOPR;
+ }
+}
+
+
+static BinOpr getbinopr (int op) {
+ switch (op) {
+ case '+': return OPR_ADD;
+ case '-': return OPR_SUB;
+ case '*': return OPR_MUL;
+ case '/': return OPR_DIV;
+ case '%': return OPR_MOD;
+ case '^': return OPR_POW;
+ case TK_CONCAT: return OPR_CONCAT;
+ case TK_NE: return OPR_NE;
+ case TK_EQ: return OPR_EQ;
+ case '<': return OPR_LT;
+ case TK_LE: return OPR_LE;
+ case '>': return OPR_GT;
+ case TK_GE: return OPR_GE;
+ case TK_AND: return OPR_AND;
+ case TK_OR: return OPR_OR;
+ default: return OPR_NOBINOPR;
+ }
+}
+
+
+static const struct {
+ lu_byte left; /* left priority for each binary operator */
+ lu_byte right; /* right priority */
+} priority[] = { /* ORDER OPR */
+ {6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, /* `+' `-' `/' `%' */
+ {10, 9}, {5, 4}, /* power and concat (right associative) */
+ {3, 3}, {3, 3}, /* equality and inequality */
+ {3, 3}, {3, 3}, {3, 3}, {3, 3}, /* order */
+ {2, 2}, {1, 1} /* logical (and/or) */
+};
+
+#define UNARY_PRIORITY 8 /* priority for unary operators */
+
+
+/*
+** subexpr -> (simpleexp | unop subexpr) { binop subexpr }
+** where `binop' is any binary operator with a priority higher than `limit'
+*/
+static BinOpr subexpr (LexState *ls, expdesc *v, unsigned int limit) {
+ BinOpr op;
+ UnOpr uop;
+ enterlevel(ls);
+ uop = getunopr(ls->t.token);
+ if (uop != OPR_NOUNOPR) {
+ luaX_next(ls);
+ subexpr(ls, v, UNARY_PRIORITY);
+ luaK_prefix(ls->fs, uop, v);
+ }
+ else simpleexp(ls, v);
+ /* expand while operators have priorities higher than `limit' */
+ op = getbinopr(ls->t.token);
+ while (op != OPR_NOBINOPR && priority[op].left > limit) {
+ expdesc v2;
+ BinOpr nextop;
+ luaX_next(ls);
+ luaK_infix(ls->fs, op, v);
+ /* read sub-expression with higher priority */
+ nextop = subexpr(ls, &v2, priority[op].right);
+ luaK_posfix(ls->fs, op, v, &v2);
+ op = nextop;
+ }
+ leavelevel(ls);
+ return op; /* return first untreated operator */
+}
+
+
+static void expr (LexState *ls, expdesc *v) {
+ subexpr(ls, v, 0);
+}
+
+/* }==================================================================== */
+
+
+
+/*
+** {======================================================================
+** Rules for Statements
+** =======================================================================
+*/
+
+
+static int block_follow (int token) {
+ switch (token) {
+ case TK_ELSE: case TK_ELSEIF: case TK_END:
+ case TK_UNTIL: case TK_EOS:
+ return 1;
+ default: return 0;
+ }
+}
+
+
+static void block (LexState *ls) {
+ /* block -> chunk */
+ FuncState *fs = ls->fs;
+ BlockCnt bl;
+ enterblock(fs, &bl, 0);
+ chunk(ls);
+ lua_assert(bl.breaklist == NO_JUMP);
+ leaveblock(fs);
+}
+
+
+/*
+** structure to chain all variables in the left-hand side of an
+** assignment
+*/
+struct LHS_assign {
+ struct LHS_assign *prev;
+ expdesc v; /* variable (global, local, upvalue, or indexed) */
+};
+
+
+/*
+** check whether, in an assignment to a local variable, the local variable
+** is needed in a previous assignment (to a table). If so, save original
+** local value in a safe place and use this safe copy in the previous
+** assignment.
+*/
+static void check_conflict (LexState *ls, struct LHS_assign *lh, expdesc *v) {
+ FuncState *fs = ls->fs;
+ int extra = fs->freereg; /* eventual position to save local variable */
+ int conflict = 0;
+ for (; lh; lh = lh->prev) {
+ if (lh->v.k == VINDEXED) {
+ if (lh->v.u.s.info == v->u.s.info) { /* conflict? */
+ conflict = 1;
+ lh->v.u.s.info = extra; /* previous assignment will use safe copy */
+ }
+ if (lh->v.u.s.aux == v->u.s.info) { /* conflict? */
+ conflict = 1;
+ lh->v.u.s.aux = extra; /* previous assignment will use safe copy */
+ }
+ }
+ }
+ if (conflict) {
+ luaK_codeABC(fs, OP_MOVE, fs->freereg, v->u.s.info, 0); /* make copy */
+ luaK_reserveregs(fs, 1);
+ }
+}
+
+
+static void assignment (LexState *ls, struct LHS_assign *lh, int nvars) {
+ expdesc e;
+ check_condition(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED,
+ "syntax error");
+ if (testnext(ls, ',')) { /* assignment -> `,' primaryexp assignment */
+ struct LHS_assign nv;
+ nv.prev = lh;
+ primaryexp(ls, &nv.v);
+ if (nv.v.k == VLOCAL)
+ check_conflict(ls, lh, &nv.v);
+ assignment(ls, &nv, nvars+1);
+ }
+ else { /* assignment -> `=' explist1 */
+ int nexps;
+ checknext(ls, '=');
+ nexps = explist1(ls, &e);
+ if (nexps != nvars) {
+ adjust_assign(ls, nvars, nexps, &e);
+ if (nexps > nvars)
+ ls->fs->freereg -= nexps - nvars; /* remove extra values */
+ }
+ else {
+ luaK_setoneret(ls->fs, &e); /* close last expression */
+ luaK_storevar(ls->fs, &lh->v, &e);
+ return; /* avoid default */
+ }
+ }
+ init_exp(&e, VNONRELOC, ls->fs->freereg-1); /* default assignment */
+ luaK_storevar(ls->fs, &lh->v, &e);
+}
+
+
+static int cond (LexState *ls) {
+ /* cond -> exp */
+ expdesc v;
+ expr(ls, &v); /* read condition */
+ if (v.k == VNIL) v.k = VFALSE; /* `falses' are all equal here */
+ luaK_goiftrue(ls->fs, &v);
+ return v.f;
+}
+
+
+static void breakstat (LexState *ls) {
+ FuncState *fs = ls->fs;
+ BlockCnt *bl = fs->bl;
+ int upval = 0;
+ while (bl && !bl->isbreakable) {
+ upval |= bl->upval;
+ bl = bl->previous;
+ }
+ if (!bl)
+ luaX_syntaxerror(ls, "no loop to break");
+ if (upval)
+ luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0);
+ luaK_concat(fs, &bl->breaklist, luaK_jump(fs));
+}
+
+
+static void whilestat (LexState *ls, int line) {
+ /* whilestat -> WHILE cond DO block END */
+ FuncState *fs = ls->fs;
+ int whileinit;
+ int condexit;
+ BlockCnt bl;
+ luaX_next(ls); /* skip WHILE */
+ whileinit = luaK_getlabel(fs);
+ condexit = cond(ls);
+ enterblock(fs, &bl, 1);
+ checknext(ls, TK_DO);
+ block(ls);
+ luaK_patchlist(fs, luaK_jump(fs), whileinit);
+ check_match(ls, TK_END, TK_WHILE, line);
+ leaveblock(fs);
+ luaK_patchtohere(fs, condexit); /* false conditions finish the loop */
+}
+
+
+static void repeatstat (LexState *ls, int line) {
+ /* repeatstat -> REPEAT block UNTIL cond */
+ int condexit;
+ FuncState *fs = ls->fs;
+ int repeat_init = luaK_getlabel(fs);
+ BlockCnt bl1, bl2;
+ enterblock(fs, &bl1, 1); /* loop block */
+ enterblock(fs, &bl2, 0); /* scope block */
+ luaX_next(ls); /* skip REPEAT */
+ chunk(ls);
+ check_match(ls, TK_UNTIL, TK_REPEAT, line);
+ condexit = cond(ls); /* read condition (inside scope block) */
+ if (!bl2.upval) { /* no upvalues? */
+ leaveblock(fs); /* finish scope */
+ luaK_patchlist(ls->fs, condexit, repeat_init); /* close the loop */
+ }
+ else { /* complete semantics when there are upvalues */
+ breakstat(ls); /* if condition then break */
+ luaK_patchtohere(ls->fs, condexit); /* else... */
+ leaveblock(fs); /* finish scope... */
+ luaK_patchlist(ls->fs, luaK_jump(fs), repeat_init); /* and repeat */
+ }
+ leaveblock(fs); /* finish loop */
+}
+
+
+static int exp1 (LexState *ls) {
+ expdesc e;
+ int k;
+ expr(ls, &e);
+ k = e.k;
+ luaK_exp2nextreg(ls->fs, &e);
+ return k;
+}
+
+
+static void forbody (LexState *ls, int base, int line, int nvars, int isnum) {
+ /* forbody -> DO block */
+ BlockCnt bl;
+ FuncState *fs = ls->fs;
+ int prep, endfor;
+ adjustlocalvars(ls, 3); /* control variables */
+ checknext(ls, TK_DO);
+ prep = isnum ? luaK_codeAsBx(fs, OP_FORPREP, base, NO_JUMP) : luaK_jump(fs);
+ enterblock(fs, &bl, 0); /* scope for declared variables */
+ adjustlocalvars(ls, nvars);
+ luaK_reserveregs(fs, nvars);
+ block(ls);
+ leaveblock(fs); /* end of scope for declared variables */
+ luaK_patchtohere(fs, prep);
+ endfor = (isnum) ? luaK_codeAsBx(fs, OP_FORLOOP, base, NO_JUMP) :
+ luaK_codeABC(fs, OP_TFORLOOP, base, 0, nvars);
+ luaK_fixline(fs, line); /* pretend that `OP_FOR' starts the loop */
+ luaK_patchlist(fs, (isnum ? endfor : luaK_jump(fs)), prep + 1);
+}
+
+
+static void fornum (LexState *ls, TString *varname, int line) {
+ /* fornum -> NAME = exp1,exp1[,exp1] forbody */
+ FuncState *fs = ls->fs;
+ int base = fs->freereg;
+ new_localvarliteral(ls, "(for index)", 0);
+ new_localvarliteral(ls, "(for limit)", 1);
+ new_localvarliteral(ls, "(for step)", 2);
+ new_localvar(ls, varname, 3);
+ checknext(ls, '=');
+ exp1(ls); /* initial value */
+ checknext(ls, ',');
+ exp1(ls); /* limit */
+ if (testnext(ls, ','))
+ exp1(ls); /* optional step */
+ else { /* default step = 1 */
+ luaK_codeABx(fs, OP_LOADK, fs->freereg, luaK_numberK(fs, 1));
+ luaK_reserveregs(fs, 1);
+ }
+ forbody(ls, base, line, 1, 1);
+}
+
+
+static void forlist (LexState *ls, TString *indexname) {
+ /* forlist -> NAME {,NAME} IN explist1 forbody */
+ FuncState *fs = ls->fs;
+ expdesc e;
+ int nvars = 0;
+ int line;
+ int base = fs->freereg;
+ /* create control variables */
+ new_localvarliteral(ls, "(for generator)", nvars++);
+ new_localvarliteral(ls, "(for state)", nvars++);
+ new_localvarliteral(ls, "(for control)", nvars++);
+ /* create declared variables */
+ new_localvar(ls, indexname, nvars++);
+ while (testnext(ls, ','))
+ new_localvar(ls, str_checkname(ls), nvars++);
+ checknext(ls, TK_IN);
+ line = ls->linenumber;
+ adjust_assign(ls, 3, explist1(ls, &e), &e);
+ luaK_checkstack(fs, 3); /* extra space to call generator */
+ forbody(ls, base, line, nvars - 3, 0);
+}
+
+
+static void forstat (LexState *ls, int line) {
+ /* forstat -> FOR (fornum | forlist) END */
+ FuncState *fs = ls->fs;
+ TString *varname;
+ BlockCnt bl;
+ enterblock(fs, &bl, 1); /* scope for loop and control variables */
+ luaX_next(ls); /* skip `for' */
+ varname = str_checkname(ls); /* first variable name */
+ switch (ls->t.token) {
+ case '=': fornum(ls, varname, line); break;
+ case ',': case TK_IN: forlist(ls, varname); break;
+ default: luaX_syntaxerror(ls, LUA_QL("=") " or " LUA_QL("in") " expected");
+ }
+ check_match(ls, TK_END, TK_FOR, line);
+ leaveblock(fs); /* loop scope (`break' jumps to this point) */
+}
+
+
+static int test_then_block (LexState *ls) {
+ /* test_then_block -> [IF | ELSEIF] cond THEN block */
+ int condexit;
+ luaX_next(ls); /* skip IF or ELSEIF */
+ condexit = cond(ls);
+ checknext(ls, TK_THEN);
+ block(ls); /* `then' part */
+ return condexit;
+}
+
+
+static void ifstat (LexState *ls, int line) {
+ /* ifstat -> IF cond THEN block {ELSEIF cond THEN block} [ELSE block] END */
+ FuncState *fs = ls->fs;
+ int flist;
+ int escapelist = NO_JUMP;
+ flist = test_then_block(ls); /* IF cond THEN block */
+ while (ls->t.token == TK_ELSEIF) {
+ luaK_concat(fs, &escapelist, luaK_jump(fs));
+ luaK_patchtohere(fs, flist);
+ flist = test_then_block(ls); /* ELSEIF cond THEN block */
+ }
+ if (ls->t.token == TK_ELSE) {
+ luaK_concat(fs, &escapelist, luaK_jump(fs));
+ luaK_patchtohere(fs, flist);
+ luaX_next(ls); /* skip ELSE (after patch, for correct line info) */
+ block(ls); /* `else' part */
+ }
+ else
+ luaK_concat(fs, &escapelist, flist);
+ luaK_patchtohere(fs, escapelist);
+ check_match(ls, TK_END, TK_IF, line);
+}
+
+
+static void localfunc (LexState *ls) {
+ expdesc v, b;
+ FuncState *fs = ls->fs;
+ new_localvar(ls, str_checkname(ls), 0);
+ init_exp(&v, VLOCAL, fs->freereg);
+ luaK_reserveregs(fs, 1);
+ adjustlocalvars(ls, 1);
+ body(ls, &b, 0, ls->linenumber);
+ luaK_storevar(fs, &v, &b);
+ /* debug information will only see the variable after this point! */
+ getlocvar(fs, fs->nactvar - 1).startpc = fs->pc;
+}
+
+
+static void localstat (LexState *ls) {
+ /* stat -> LOCAL NAME {`,' NAME} [`=' explist1] */
+ int nvars = 0;
+ int nexps;
+ expdesc e;
+ do {
+ new_localvar(ls, str_checkname(ls), nvars++);
+ } while (testnext(ls, ','));
+ if (testnext(ls, '='))
+ nexps = explist1(ls, &e);
+ else {
+ e.k = VVOID;
+ nexps = 0;
+ }
+ adjust_assign(ls, nvars, nexps, &e);
+ adjustlocalvars(ls, nvars);
+}
+
+
+static int funcname (LexState *ls, expdesc *v) {
+ /* funcname -> NAME {field} [`:' NAME] */
+ int needself = 0;
+ singlevar(ls, v);
+ while (ls->t.token == '.')
+ field(ls, v);
+ if (ls->t.token == ':') {
+ needself = 1;
+ field(ls, v);
+ }
+ return needself;
+}
+
+
+static void funcstat (LexState *ls, int line) {
+ /* funcstat -> FUNCTION funcname body */
+ int needself;
+ expdesc v, b;
+ luaX_next(ls); /* skip FUNCTION */
+ needself = funcname(ls, &v);
+ body(ls, &b, needself, line);
+ luaK_storevar(ls->fs, &v, &b);
+ luaK_fixline(ls->fs, line); /* definition `happens' in the first line */
+}
+
+
+static void exprstat (LexState *ls) {
+ /* stat -> func | assignment */
+ FuncState *fs = ls->fs;
+ struct LHS_assign v;
+ primaryexp(ls, &v.v);
+ if (v.v.k == VCALL) /* stat -> func */
+ SETARG_C(getcode(fs, &v.v), 1); /* call statement uses no results */
+ else { /* stat -> assignment */
+ v.prev = NULL;
+ assignment(ls, &v, 1);
+ }
+}
+
+
+static void retstat (LexState *ls) {
+ /* stat -> RETURN explist */
+ FuncState *fs = ls->fs;
+ expdesc e;
+ int first, nret; /* registers with returned values */
+ luaX_next(ls); /* skip RETURN */
+ if (block_follow(ls->t.token) || ls->t.token == ';')
+ first = nret = 0; /* return no values */
+ else {
+ nret = explist1(ls, &e); /* optional return values */
+ if (hasmultret(e.k)) {
+ luaK_setmultret(fs, &e);
+ if (e.k == VCALL && nret == 1) { /* tail call? */
+ SET_OPCODE(getcode(fs,&e), OP_TAILCALL);
+ lua_assert(GETARG_A(getcode(fs,&e)) == fs->nactvar);
+ }
+ first = fs->nactvar;
+ nret = LUA_MULTRET; /* return all values */
+ }
+ else {
+ if (nret == 1) /* only one single value? */
+ first = luaK_exp2anyreg(fs, &e);
+ else {
+ luaK_exp2nextreg(fs, &e); /* values must go to the `stack' */
+ first = fs->nactvar; /* return all `active' values */
+ lua_assert(nret == fs->freereg - first);
+ }
+ }
+ }
+ luaK_ret(fs, first, nret);
+}
+
+
+static int statement (LexState *ls) {
+ int line = ls->linenumber; /* may be needed for error messages */
+ switch (ls->t.token) {
+ case TK_IF: { /* stat -> ifstat */
+ ifstat(ls, line);
+ return 0;
+ }
+ case TK_WHILE: { /* stat -> whilestat */
+ whilestat(ls, line);
+ return 0;
+ }
+ case TK_DO: { /* stat -> DO block END */
+ luaX_next(ls); /* skip DO */
+ block(ls);
+ check_match(ls, TK_END, TK_DO, line);
+ return 0;
+ }
+ case TK_FOR: { /* stat -> forstat */
+ forstat(ls, line);
+ return 0;
+ }
+ case TK_REPEAT: { /* stat -> repeatstat */
+ repeatstat(ls, line);
+ return 0;
+ }
+ case TK_FUNCTION: {
+ funcstat(ls, line); /* stat -> funcstat */
+ return 0;
+ }
+ case TK_LOCAL: { /* stat -> localstat */
+ luaX_next(ls); /* skip LOCAL */
+ if (testnext(ls, TK_FUNCTION)) /* local function? */
+ localfunc(ls);
+ else
+ localstat(ls);
+ return 0;
+ }
+ case TK_RETURN: { /* stat -> retstat */
+ retstat(ls);
+ return 1; /* must be last statement */
+ }
+ case TK_BREAK: { /* stat -> breakstat */
+ luaX_next(ls); /* skip BREAK */
+ breakstat(ls);
+ return 1; /* must be last statement */
+ }
+ default: {
+ exprstat(ls);
+ return 0; /* to avoid warnings */
+ }
+ }
+}
+
+
+static void chunk (LexState *ls) {
+ /* chunk -> { stat [`;'] } */
+ int islast = 0;
+ enterlevel(ls);
+ while (!islast && !block_follow(ls->t.token)) {
+ islast = statement(ls);
+ testnext(ls, ';');
+ lua_assert(ls->fs->f->maxstacksize >= ls->fs->freereg &&
+ ls->fs->freereg >= ls->fs->nactvar);
+ ls->fs->freereg = ls->fs->nactvar; /* free registers */
+ }
+ leavelevel(ls);
+}
+
+/* }====================================================================== */
diff --git a/deps/lua/src/lparser.h b/deps/lua/src/lparser.h
new file mode 100644
index 0000000000000000000000000000000000000000..d5e6e81d0d4bba0728a00cf6a82a388e339dc8b2
--- /dev/null
+++ b/deps/lua/src/lparser.h
@@ -0,0 +1,83 @@
+/*
+** $Id: lparser.h,v 1.56 2005/10/03 14:02:40 roberto Exp $
+** Lua Parser
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lparser_h
+#define lparser_h
+
+#include "llimits.h"
+#include "lobject.h"
+#include "ltable.h"
+#include "lzio.h"
+
+
+/*
+** Expression descriptor
+*/
+
+typedef enum {
+ VVOID, /* no value */
+ VNIL,
+ VTRUE,
+ VFALSE,
+ VK, /* info = index of constant in `k' */
+ VKNUM, /* nval = numerical value */
+ VLOCAL, /* info = local register */
+ VUPVAL, /* info = index of upvalue in `upvalues' */
+ VGLOBAL, /* info = index of table; aux = index of global name in `k' */
+ VINDEXED, /* info = table register; aux = index register (or `k') */
+ VJMP, /* info = instruction pc */
+ VRELOCABLE, /* info = instruction pc */
+ VNONRELOC, /* info = result register */
+ VCALL, /* info = instruction pc */
+ VVARARG /* info = instruction pc */
+} expkind;
+
+typedef struct expdesc {
+ expkind k;
+ union {
+ struct { int info, aux; } s;
+ lua_Number nval;
+ } u;
+ int t; /* patch list of `exit when true' */
+ int f; /* patch list of `exit when false' */
+} expdesc;
+
+
+typedef struct upvaldesc {
+ lu_byte k;
+ lu_byte info;
+} upvaldesc;
+
+
+struct BlockCnt; /* defined in lparser.c */
+
+
+/* state needed to generate code for a given function */
+typedef struct FuncState {
+ Proto *f; /* current function header */
+ Table *h; /* table to find (and reuse) elements in `k' */
+ struct FuncState *prev; /* enclosing function */
+ struct LexState *ls; /* lexical state */
+ struct lua_State *L; /* copy of the Lua state */
+ struct BlockCnt *bl; /* chain of current blocks */
+ int pc; /* next position to code (equivalent to `ncode') */
+ int lasttarget; /* `pc' of last `jump target' */
+ int jpc; /* list of pending jumps to `pc' */
+ int freereg; /* first free register */
+ int nk; /* number of elements in `k' */
+ int np; /* number of elements in `p' */
+ short nlocvars; /* number of elements in `locvars' */
+ lu_byte nactvar; /* number of active local variables */
+ upvaldesc upvalues[LUAI_MAXUPVALUES]; /* upvalues */
+ unsigned short actvar[LUAI_MAXVARS]; /* declared-variable stack */
+} FuncState;
+
+
+LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff,
+ const char *name);
+
+
+#endif
diff --git a/deps/lua/src/lstate.c b/deps/lua/src/lstate.c
new file mode 100644
index 0000000000000000000000000000000000000000..77e93fbdfa056a05c5de3b9ad09dee4e3e2e46f7
--- /dev/null
+++ b/deps/lua/src/lstate.c
@@ -0,0 +1,214 @@
+/*
+** $Id: lstate.c,v 2.35 2005/10/06 20:46:25 roberto Exp $
+** Global State
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lstate_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lgc.h"
+#include "llex.h"
+#include "lmem.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+
+
+#define state_size(x) (sizeof(x) + LUAI_EXTRASPACE)
+#define fromstate(l) (cast(lu_byte *, (l)) - LUAI_EXTRASPACE)
+#define tostate(l) (cast(lua_State *, cast(lu_byte *, l) + LUAI_EXTRASPACE))
+
+
+/*
+** Main thread combines a thread state and the global state
+*/
+typedef struct LG {
+ lua_State l;
+ global_State g;
+} LG;
+
+
+
+static void stack_init (lua_State *L1, lua_State *L) {
+ /* initialize CallInfo array */
+ L1->base_ci = luaM_newvector(L, BASIC_CI_SIZE, CallInfo);
+ L1->ci = L1->base_ci;
+ L1->size_ci = BASIC_CI_SIZE;
+ L1->end_ci = L1->base_ci + L1->size_ci - 1;
+ /* initialize stack array */
+ L1->stack = luaM_newvector(L, BASIC_STACK_SIZE + EXTRA_STACK, TValue);
+ L1->stacksize = BASIC_STACK_SIZE + EXTRA_STACK;
+ L1->top = L1->stack;
+ L1->stack_last = L1->stack+(L1->stacksize - EXTRA_STACK)-1;
+ /* initialize first ci */
+ L1->ci->func = L1->top;
+ setnilvalue(L1->top++); /* `function' entry for this `ci' */
+ L1->base = L1->ci->base = L1->top;
+ L1->ci->top = L1->top + LUA_MINSTACK;
+}
+
+
+static void freestack (lua_State *L, lua_State *L1) {
+ luaM_freearray(L, L1->base_ci, L1->size_ci, CallInfo);
+ luaM_freearray(L, L1->stack, L1->stacksize, TValue);
+}
+
+
+/*
+** open parts that may cause memory-allocation errors
+*/
+static void f_luaopen (lua_State *L, void *ud) {
+ global_State *g = G(L);
+ UNUSED(ud);
+ stack_init(L, L); /* init stack */
+ sethvalue(L, gt(L), luaH_new(L, 0, 2)); /* table of globals */
+ sethvalue(L, registry(L), luaH_new(L, 0, 2)); /* registry */
+ luaS_resize(L, MINSTRTABSIZE); /* initial size of string table */
+ luaT_init(L);
+ luaX_init(L);
+ luaS_fix(luaS_newliteral(L, MEMERRMSG));
+ g->GCthreshold = 4*g->totalbytes;
+}
+
+
+static void preinit_state (lua_State *L, global_State *g) {
+ G(L) = g;
+ L->stack = NULL;
+ L->stacksize = 0;
+ L->errorJmp = NULL;
+ L->hook = NULL;
+ L->hookmask = 0;
+ L->basehookcount = 0;
+ L->allowhook = 1;
+ resethookcount(L);
+ L->openupval = NULL;
+ L->size_ci = 0;
+ L->nCcalls = 0;
+ L->status = 0;
+ L->base_ci = L->ci = NULL;
+ L->savedpc = NULL;
+ L->errfunc = 0;
+ setnilvalue(gt(L));
+}
+
+
+static void close_state (lua_State *L) {
+ global_State *g = G(L);
+ luaF_close(L, L->stack); /* close all upvalues for this thread */
+ luaC_freeall(L); /* collect all objects */
+ lua_assert(g->rootgc == obj2gco(L));
+ lua_assert(g->strt.nuse == 0);
+ luaM_freearray(L, G(L)->strt.hash, G(L)->strt.size, TString *);
+ luaZ_freebuffer(L, &g->buff);
+ freestack(L, L);
+ lua_assert(g->totalbytes == sizeof(LG));
+ (*g->frealloc)(g->ud, fromstate(L), state_size(LG), 0);
+}
+
+
+lua_State *luaE_newthread (lua_State *L) {
+ lua_State *L1 = tostate(luaM_malloc(L, state_size(lua_State)));
+ luaC_link(L, obj2gco(L1), LUA_TTHREAD);
+ preinit_state(L1, G(L));
+ stack_init(L1, L); /* init stack */
+ setobj2n(L, gt(L1), gt(L)); /* share table of globals */
+ L1->hookmask = L->hookmask;
+ L1->basehookcount = L->basehookcount;
+ L1->hook = L->hook;
+ resethookcount(L1);
+ lua_assert(iswhite(obj2gco(L1)));
+ return L1;
+}
+
+
+void luaE_freethread (lua_State *L, lua_State *L1) {
+ luaF_close(L1, L1->stack); /* close all upvalues for this thread */
+ lua_assert(L1->openupval == NULL);
+ luai_userstatefree(L1);
+ freestack(L, L1);
+ luaM_freemem(L, fromstate(L1), state_size(lua_State));
+}
+
+
+LUA_API lua_State *lua_newstate (lua_Alloc f, void *ud) {
+ int i;
+ lua_State *L;
+ global_State *g;
+ void *l = (*f)(ud, NULL, 0, state_size(LG));
+ if (l == NULL) return NULL;
+ L = tostate(l);
+ g = &((LG *)L)->g;
+ L->next = NULL;
+ L->tt = LUA_TTHREAD;
+ g->currentwhite = bit2mask(WHITE0BIT, FIXEDBIT);
+ L->marked = luaC_white(g);
+ set2bits(L->marked, FIXEDBIT, SFIXEDBIT);
+ preinit_state(L, g);
+ g->frealloc = f;
+ g->ud = ud;
+ g->mainthread = L;
+ g->uvhead.u.l.prev = &g->uvhead;
+ g->uvhead.u.l.next = &g->uvhead;
+ g->GCthreshold = 0; /* mark it as unfinished state */
+ g->strt.size = 0;
+ g->strt.nuse = 0;
+ g->strt.hash = NULL;
+ setnilvalue(registry(L));
+ luaZ_initbuffer(L, &g->buff);
+ g->panic = NULL;
+ g->gcstate = GCSpause;
+ g->rootgc = obj2gco(L);
+ g->sweepstrgc = 0;
+ g->sweepgc = &g->rootgc;
+ g->gray = NULL;
+ g->grayagain = NULL;
+ g->weak = NULL;
+ g->tmudata = NULL;
+ g->totalbytes = sizeof(LG);
+ g->gcpause = LUAI_GCPAUSE;
+ g->gcstepmul = LUAI_GCMUL;
+ g->gcdept = 0;
+ for (i=0; imt[i] = NULL;
+ if (luaD_rawrunprotected(L, f_luaopen, NULL) != 0) {
+ /* memory allocation error: free partial state */
+ close_state(L);
+ L = NULL;
+ }
+ else
+ luai_userstateopen(L);
+ return L;
+}
+
+
+static void callallgcTM (lua_State *L, void *ud) {
+ UNUSED(ud);
+ luaC_callGCTM(L); /* call GC metamethods for all udata */
+}
+
+
+LUA_API void lua_close (lua_State *L) {
+ L = G(L)->mainthread; /* only the main thread can be closed */
+ luai_userstateclose(L);
+ lua_lock(L);
+ luaF_close(L, L->stack); /* close all upvalues for this thread */
+ luaC_separateudata(L, 1); /* separate udata that have GC metamethods */
+ L->errfunc = 0; /* no error function during GC metamethods */
+ do { /* repeat until no more errors */
+ L->ci = L->base_ci;
+ L->base = L->top = L->ci->base;
+ L->nCcalls = 0;
+ } while (luaD_rawrunprotected(L, callallgcTM, NULL) != 0);
+ lua_assert(G(L)->tmudata == NULL);
+ close_state(L);
+}
+
diff --git a/deps/lua/src/lstate.h b/deps/lua/src/lstate.h
new file mode 100644
index 0000000000000000000000000000000000000000..d296a4cab99e1cb1ec5f09bc0cd1bfc74e1cd5cc
--- /dev/null
+++ b/deps/lua/src/lstate.h
@@ -0,0 +1,168 @@
+/*
+** $Id: lstate.h,v 2.24 2006/02/06 18:27:59 roberto Exp $
+** Global State
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lstate_h
+#define lstate_h
+
+#include "lua.h"
+
+#include "lobject.h"
+#include "ltm.h"
+#include "lzio.h"
+
+
+
+struct lua_longjmp; /* defined in ldo.c */
+
+
+/* table of globals */
+#define gt(L) (&L->l_gt)
+
+/* registry */
+#define registry(L) (&G(L)->l_registry)
+
+
+/* extra stack space to handle TM calls and some other extras */
+#define EXTRA_STACK 5
+
+
+#define BASIC_CI_SIZE 8
+
+#define BASIC_STACK_SIZE (2*LUA_MINSTACK)
+
+
+
+typedef struct stringtable {
+ GCObject **hash;
+ lu_int32 nuse; /* number of elements */
+ int size;
+} stringtable;
+
+
+/*
+** informations about a call
+*/
+typedef struct CallInfo {
+ StkId base; /* base for this function */
+ StkId func; /* function index in the stack */
+ StkId top; /* top for this function */
+ const Instruction *savedpc;
+ int nresults; /* expected number of results from this function */
+ int tailcalls; /* number of tail calls lost under this entry */
+} CallInfo;
+
+
+
+#define curr_func(L) (clvalue(L->ci->func))
+#define ci_func(ci) (clvalue((ci)->func))
+#define f_isLua(ci) (!ci_func(ci)->c.isC)
+#define isLua(ci) (ttisfunction((ci)->func) && f_isLua(ci))
+
+
+/*
+** `global state', shared by all threads of this state
+*/
+typedef struct global_State {
+ stringtable strt; /* hash table for strings */
+ lua_Alloc frealloc; /* function to reallocate memory */
+ void *ud; /* auxiliary data to `frealloc' */
+ lu_byte currentwhite;
+ lu_byte gcstate; /* state of garbage collector */
+ int sweepstrgc; /* position of sweep in `strt' */
+ GCObject *rootgc; /* list of all collectable objects */
+ GCObject **sweepgc; /* position of sweep in `rootgc' */
+ GCObject *gray; /* list of gray objects */
+ GCObject *grayagain; /* list of objects to be traversed atomically */
+ GCObject *weak; /* list of weak tables (to be cleared) */
+ GCObject *tmudata; /* last element of list of userdata to be GC */
+ Mbuffer buff; /* temporary buffer for string concatentation */
+ lu_mem GCthreshold;
+ lu_mem totalbytes; /* number of bytes currently allocated */
+ lu_mem estimate; /* an estimate of number of bytes actually in use */
+ lu_mem gcdept; /* how much GC is `behind schedule' */
+ int gcpause; /* size of pause between successive GCs */
+ int gcstepmul; /* GC `granularity' */
+ lua_CFunction panic; /* to be called in unprotected errors */
+ TValue l_registry;
+ struct lua_State *mainthread;
+ UpVal uvhead; /* head of double-linked list of all open upvalues */
+ struct Table *mt[NUM_TAGS]; /* metatables for basic types */
+ TString *tmname[TM_N]; /* array with tag-method names */
+} global_State;
+
+
+/*
+** `per thread' state
+*/
+struct lua_State {
+ CommonHeader;
+ lu_byte status;
+ StkId top; /* first free slot in the stack */
+ StkId base; /* base of current function */
+ global_State *l_G;
+ CallInfo *ci; /* call info for current function */
+ const Instruction *savedpc; /* `savedpc' of current function */
+ StkId stack_last; /* last free slot in the stack */
+ StkId stack; /* stack base */
+ CallInfo *end_ci; /* points after end of ci array*/
+ CallInfo *base_ci; /* array of CallInfo's */
+ int stacksize;
+ int size_ci; /* size of array `base_ci' */
+ unsigned short nCcalls; /* number of nested C calls */
+ lu_byte hookmask;
+ lu_byte allowhook;
+ int basehookcount;
+ int hookcount;
+ lua_Hook hook;
+ TValue l_gt; /* table of globals */
+ TValue env; /* temporary place for environments */
+ GCObject *openupval; /* list of open upvalues in this stack */
+ GCObject *gclist;
+ struct lua_longjmp *errorJmp; /* current error recover point */
+ ptrdiff_t errfunc; /* current error handling function (stack index) */
+};
+
+
+#define G(L) (L->l_G)
+
+
+/*
+** Union of all collectable objects
+*/
+union GCObject {
+ GCheader gch;
+ union TString ts;
+ union Udata u;
+ union Closure cl;
+ struct Table h;
+ struct Proto p;
+ struct UpVal uv;
+ struct lua_State th; /* thread */
+};
+
+
+/* macros to convert a GCObject into a specific value */
+#define rawgco2ts(o) check_exp((o)->gch.tt == LUA_TSTRING, &((o)->ts))
+#define gco2ts(o) (&rawgco2ts(o)->tsv)
+#define rawgco2u(o) check_exp((o)->gch.tt == LUA_TUSERDATA, &((o)->u))
+#define gco2u(o) (&rawgco2u(o)->uv)
+#define gco2cl(o) check_exp((o)->gch.tt == LUA_TFUNCTION, &((o)->cl))
+#define gco2h(o) check_exp((o)->gch.tt == LUA_TTABLE, &((o)->h))
+#define gco2p(o) check_exp((o)->gch.tt == LUA_TPROTO, &((o)->p))
+#define gco2uv(o) check_exp((o)->gch.tt == LUA_TUPVAL, &((o)->uv))
+#define ngcotouv(o) \
+ check_exp((o) == NULL || (o)->gch.tt == LUA_TUPVAL, &((o)->uv))
+#define gco2th(o) check_exp((o)->gch.tt == LUA_TTHREAD, &((o)->th))
+
+/* macro to convert any Lua object into a GCObject */
+#define obj2gco(v) (cast(GCObject *, (v)))
+
+
+LUAI_FUNC lua_State *luaE_newthread (lua_State *L);
+LUAI_FUNC void luaE_freethread (lua_State *L, lua_State *L1);
+
+#endif
+
diff --git a/deps/lua/src/lstring.c b/deps/lua/src/lstring.c
new file mode 100644
index 0000000000000000000000000000000000000000..4319930c96d07e61fe98b697b87c538280cf38c9
--- /dev/null
+++ b/deps/lua/src/lstring.c
@@ -0,0 +1,111 @@
+/*
+** $Id: lstring.c,v 2.8 2005/12/22 16:19:56 roberto Exp $
+** String table (keeps all strings handled by Lua)
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lstring_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lmem.h"
+#include "lobject.h"
+#include "lstate.h"
+#include "lstring.h"
+
+
+
+void luaS_resize (lua_State *L, int newsize) {
+ GCObject **newhash;
+ stringtable *tb;
+ int i;
+ if (G(L)->gcstate == GCSsweepstring)
+ return; /* cannot resize during GC traverse */
+ newhash = luaM_newvector(L, newsize, GCObject *);
+ tb = &G(L)->strt;
+ for (i=0; isize; i++) {
+ GCObject *p = tb->hash[i];
+ while (p) { /* for each node in the list */
+ GCObject *next = p->gch.next; /* save next */
+ unsigned int h = gco2ts(p)->hash;
+ int h1 = lmod(h, newsize); /* new position */
+ lua_assert(cast_int(h%newsize) == lmod(h, newsize));
+ p->gch.next = newhash[h1]; /* chain it */
+ newhash[h1] = p;
+ p = next;
+ }
+ }
+ luaM_freearray(L, tb->hash, tb->size, TString *);
+ tb->size = newsize;
+ tb->hash = newhash;
+}
+
+
+static TString *newlstr (lua_State *L, const char *str, size_t l,
+ unsigned int h) {
+ TString *ts;
+ stringtable *tb;
+ if (l+1 > (MAX_SIZET - sizeof(TString))/sizeof(char))
+ luaM_toobig(L);
+ ts = cast(TString *, luaM_malloc(L, (l+1)*sizeof(char)+sizeof(TString)));
+ ts->tsv.len = l;
+ ts->tsv.hash = h;
+ ts->tsv.marked = luaC_white(G(L));
+ ts->tsv.tt = LUA_TSTRING;
+ ts->tsv.reserved = 0;
+ memcpy(ts+1, str, l*sizeof(char));
+ ((char *)(ts+1))[l] = '\0'; /* ending 0 */
+ tb = &G(L)->strt;
+ h = lmod(h, tb->size);
+ ts->tsv.next = tb->hash[h]; /* chain new entry */
+ tb->hash[h] = obj2gco(ts);
+ tb->nuse++;
+ if (tb->nuse > cast(lu_int32, tb->size) && tb->size <= MAX_INT/2)
+ luaS_resize(L, tb->size*2); /* too crowded */
+ return ts;
+}
+
+
+TString *luaS_newlstr (lua_State *L, const char *str, size_t l) {
+ GCObject *o;
+ unsigned int h = cast(unsigned int, l); /* seed */
+ size_t step = (l>>5)+1; /* if string is too long, don't hash all its chars */
+ size_t l1;
+ for (l1=l; l1>=step; l1-=step) /* compute hash */
+ h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1]));
+ for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)];
+ o != NULL;
+ o = o->gch.next) {
+ TString *ts = rawgco2ts(o);
+ if (ts->tsv.len == l && (memcmp(str, getstr(ts), l) == 0)) {
+ /* string may be dead */
+ if (isdead(G(L), o)) changewhite(o);
+ return ts;
+ }
+ }
+ return newlstr(L, str, l, h); /* not found */
+}
+
+
+Udata *luaS_newudata (lua_State *L, size_t s, Table *e) {
+ Udata *u;
+ if (s > MAX_SIZET - sizeof(Udata))
+ luaM_toobig(L);
+ u = cast(Udata *, luaM_malloc(L, s + sizeof(Udata)));
+ u->uv.marked = luaC_white(G(L)); /* is not finalized */
+ u->uv.tt = LUA_TUSERDATA;
+ u->uv.len = s;
+ u->uv.metatable = NULL;
+ u->uv.env = e;
+ /* chain it on udata list (after main thread) */
+ u->uv.next = G(L)->mainthread->next;
+ G(L)->mainthread->next = obj2gco(u);
+ return u;
+}
+
diff --git a/deps/lua/src/lstring.h b/deps/lua/src/lstring.h
new file mode 100644
index 0000000000000000000000000000000000000000..1d2e91ea13a31bcfbdbee4c63dc17d1e99e9f8e4
--- /dev/null
+++ b/deps/lua/src/lstring.h
@@ -0,0 +1,31 @@
+/*
+** $Id: lstring.h,v 1.43 2005/04/25 19:24:10 roberto Exp $
+** String table (keep all strings handled by Lua)
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lstring_h
+#define lstring_h
+
+
+#include "lgc.h"
+#include "lobject.h"
+#include "lstate.h"
+
+
+#define sizestring(s) (sizeof(union TString)+((s)->len+1)*sizeof(char))
+
+#define sizeudata(u) (sizeof(union Udata)+(u)->len)
+
+#define luaS_new(L, s) (luaS_newlstr(L, s, strlen(s)))
+#define luaS_newliteral(L, s) (luaS_newlstr(L, "" s, \
+ (sizeof(s)/sizeof(char))-1))
+
+#define luaS_fix(s) l_setbit((s)->tsv.marked, FIXEDBIT)
+
+LUAI_FUNC void luaS_resize (lua_State *L, int newsize);
+LUAI_FUNC Udata *luaS_newudata (lua_State *L, size_t s, Table *e);
+LUAI_FUNC TString *luaS_newlstr (lua_State *L, const char *str, size_t l);
+
+
+#endif
diff --git a/deps/lua/src/lstrlib.c b/deps/lua/src/lstrlib.c
new file mode 100644
index 0000000000000000000000000000000000000000..84478fd106c6e96203d68db4e642302d1fba745a
--- /dev/null
+++ b/deps/lua/src/lstrlib.c
@@ -0,0 +1,863 @@
+/*
+** $Id: lstrlib.c,v 1.130 2005/12/29 15:32:11 roberto Exp $
+** Standard library for string operations and pattern-matching
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+#include
+
+#define lstrlib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+/* macro to `unsign' a character */
+#define uchar(c) ((unsigned char)(c))
+
+
+
+static int str_len (lua_State *L) {
+ size_t l;
+ luaL_checklstring(L, 1, &l);
+ lua_pushinteger(L, l);
+ return 1;
+}
+
+
+static ptrdiff_t posrelat (ptrdiff_t pos, size_t len) {
+ /* relative string position: negative means back from end */
+ return (pos>=0) ? pos : (ptrdiff_t)len+pos+1;
+}
+
+
+static int str_sub (lua_State *L) {
+ size_t l;
+ const char *s = luaL_checklstring(L, 1, &l);
+ ptrdiff_t start = posrelat(luaL_checkinteger(L, 2), l);
+ ptrdiff_t end = posrelat(luaL_optinteger(L, 3, -1), l);
+ if (start < 1) start = 1;
+ if (end > (ptrdiff_t)l) end = (ptrdiff_t)l;
+ if (start <= end)
+ lua_pushlstring(L, s+start-1, end-start+1);
+ else lua_pushliteral(L, "");
+ return 1;
+}
+
+
+static int str_reverse (lua_State *L) {
+ size_t l;
+ luaL_Buffer b;
+ const char *s = luaL_checklstring(L, 1, &l);
+ luaL_buffinit(L, &b);
+ while (l--) luaL_addchar(&b, s[l]);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static int str_lower (lua_State *L) {
+ size_t l;
+ size_t i;
+ luaL_Buffer b;
+ const char *s = luaL_checklstring(L, 1, &l);
+ luaL_buffinit(L, &b);
+ for (i=0; i 0)
+ luaL_addlstring(&b, s, l);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static int str_byte (lua_State *L) {
+ size_t l;
+ const char *s = luaL_checklstring(L, 1, &l);
+ ptrdiff_t posi = posrelat(luaL_optinteger(L, 2, 1), l);
+ ptrdiff_t pose = posrelat(luaL_optinteger(L, 3, posi), l);
+ int n, i;
+ if (posi <= 0) posi = 1;
+ if ((size_t)pose > l) pose = l;
+ if (posi > pose) return 0; /* empty interval; return no values */
+ n = (int)(pose - posi + 1);
+ if (posi + n <= pose) /* overflow? */
+ luaL_error(L, "string slice too long");
+ luaL_checkstack(L, n, "string slice too long");
+ for (i=0; i= ms->level || ms->capture[l].len == CAP_UNFINISHED)
+ return luaL_error(ms->L, "invalid capture index");
+ return l;
+}
+
+
+static int capture_to_close (MatchState *ms) {
+ int level = ms->level;
+ for (level--; level>=0; level--)
+ if (ms->capture[level].len == CAP_UNFINISHED) return level;
+ return luaL_error(ms->L, "invalid pattern capture");
+}
+
+
+static const char *classend (MatchState *ms, const char *p) {
+ switch (*p++) {
+ case L_ESC: {
+ if (*p == '\0')
+ luaL_error(ms->L, "malformed pattern (ends with " LUA_QL("%%") ")");
+ return p+1;
+ }
+ case '[': {
+ if (*p == '^') p++;
+ do { /* look for a `]' */
+ if (*p == '\0')
+ luaL_error(ms->L, "malformed pattern (missing " LUA_QL("]") ")");
+ if (*(p++) == L_ESC && *p != '\0')
+ p++; /* skip escapes (e.g. `%]') */
+ } while (*p != ']');
+ return p+1;
+ }
+ default: {
+ return p;
+ }
+ }
+}
+
+
+static int match_class (int c, int cl) {
+ int res;
+ switch (tolower(cl)) {
+ case 'a' : res = isalpha(c); break;
+ case 'c' : res = iscntrl(c); break;
+ case 'd' : res = isdigit(c); break;
+ case 'l' : res = islower(c); break;
+ case 'p' : res = ispunct(c); break;
+ case 's' : res = isspace(c); break;
+ case 'u' : res = isupper(c); break;
+ case 'w' : res = isalnum(c); break;
+ case 'x' : res = isxdigit(c); break;
+ case 'z' : res = (c == 0); break;
+ default: return (cl == c);
+ }
+ return (islower(cl) ? res : !res);
+}
+
+
+static int matchbracketclass (int c, const char *p, const char *ec) {
+ int sig = 1;
+ if (*(p+1) == '^') {
+ sig = 0;
+ p++; /* skip the `^' */
+ }
+ while (++p < ec) {
+ if (*p == L_ESC) {
+ p++;
+ if (match_class(c, uchar(*p)))
+ return sig;
+ }
+ else if ((*(p+1) == '-') && (p+2 < ec)) {
+ p+=2;
+ if (uchar(*(p-2)) <= c && c <= uchar(*p))
+ return sig;
+ }
+ else if (uchar(*p) == c) return sig;
+ }
+ return !sig;
+}
+
+
+static int singlematch (int c, const char *p, const char *ep) {
+ switch (*p) {
+ case '.': return 1; /* matches any char */
+ case L_ESC: return match_class(c, uchar(*(p+1)));
+ case '[': return matchbracketclass(c, p, ep-1);
+ default: return (uchar(*p) == c);
+ }
+}
+
+
+static const char *match (MatchState *ms, const char *s, const char *p);
+
+
+static const char *matchbalance (MatchState *ms, const char *s,
+ const char *p) {
+ if (*p == 0 || *(p+1) == 0)
+ luaL_error(ms->L, "unbalanced pattern");
+ if (*s != *p) return NULL;
+ else {
+ int b = *p;
+ int e = *(p+1);
+ int cont = 1;
+ while (++s < ms->src_end) {
+ if (*s == e) {
+ if (--cont == 0) return s+1;
+ }
+ else if (*s == b) cont++;
+ }
+ }
+ return NULL; /* string ends out of balance */
+}
+
+
+static const char *max_expand (MatchState *ms, const char *s,
+ const char *p, const char *ep) {
+ ptrdiff_t i = 0; /* counts maximum expand for item */
+ while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep))
+ i++;
+ /* keeps trying to match with the maximum repetitions */
+ while (i>=0) {
+ const char *res = match(ms, (s+i), ep+1);
+ if (res) return res;
+ i--; /* else didn't match; reduce 1 repetition to try again */
+ }
+ return NULL;
+}
+
+
+static const char *min_expand (MatchState *ms, const char *s,
+ const char *p, const char *ep) {
+ for (;;) {
+ const char *res = match(ms, s, ep+1);
+ if (res != NULL)
+ return res;
+ else if (ssrc_end && singlematch(uchar(*s), p, ep))
+ s++; /* try with one more repetition */
+ else return NULL;
+ }
+}
+
+
+static const char *start_capture (MatchState *ms, const char *s,
+ const char *p, int what) {
+ const char *res;
+ int level = ms->level;
+ if (level >= LUA_MAXCAPTURES) luaL_error(ms->L, "too many captures");
+ ms->capture[level].init = s;
+ ms->capture[level].len = what;
+ ms->level = level+1;
+ if ((res=match(ms, s, p)) == NULL) /* match failed? */
+ ms->level--; /* undo capture */
+ return res;
+}
+
+
+static const char *end_capture (MatchState *ms, const char *s,
+ const char *p) {
+ int l = capture_to_close(ms);
+ const char *res;
+ ms->capture[l].len = s - ms->capture[l].init; /* close capture */
+ if ((res = match(ms, s, p)) == NULL) /* match failed? */
+ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
+ return res;
+}
+
+
+static const char *match_capture (MatchState *ms, const char *s, int l) {
+ size_t len;
+ l = check_capture(ms, l);
+ len = ms->capture[l].len;
+ if ((size_t)(ms->src_end-s) >= len &&
+ memcmp(ms->capture[l].init, s, len) == 0)
+ return s+len;
+ else return NULL;
+}
+
+
+static const char *match (MatchState *ms, const char *s, const char *p) {
+ init: /* using goto's to optimize tail recursion */
+ switch (*p) {
+ case '(': { /* start capture */
+ if (*(p+1) == ')') /* position capture? */
+ return start_capture(ms, s, p+2, CAP_POSITION);
+ else
+ return start_capture(ms, s, p+1, CAP_UNFINISHED);
+ }
+ case ')': { /* end capture */
+ return end_capture(ms, s, p+1);
+ }
+ case L_ESC: {
+ switch (*(p+1)) {
+ case 'b': { /* balanced string? */
+ s = matchbalance(ms, s, p+2);
+ if (s == NULL) return NULL;
+ p+=4; goto init; /* else return match(ms, s, p+4); */
+ }
+ case 'f': { /* frontier? */
+ const char *ep; char previous;
+ p += 2;
+ if (*p != '[')
+ luaL_error(ms->L, "missing " LUA_QL("[") " after "
+ LUA_QL("%%f") " in pattern");
+ ep = classend(ms, p); /* points to what is next */
+ previous = (s == ms->src_init) ? '\0' : *(s-1);
+ if (matchbracketclass(uchar(previous), p, ep-1) ||
+ !matchbracketclass(uchar(*s), p, ep-1)) return NULL;
+ p=ep; goto init; /* else return match(ms, s, ep); */
+ }
+ default: {
+ if (isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
+ s = match_capture(ms, s, uchar(*(p+1)));
+ if (s == NULL) return NULL;
+ p+=2; goto init; /* else return match(ms, s, p+2) */
+ }
+ goto dflt; /* case default */
+ }
+ }
+ }
+ case '\0': { /* end of pattern */
+ return s; /* match succeeded */
+ }
+ case '$': {
+ if (*(p+1) == '\0') /* is the `$' the last char in pattern? */
+ return (s == ms->src_end) ? s : NULL; /* check end of string */
+ else goto dflt;
+ }
+ default: dflt: { /* it is a pattern item */
+ const char *ep = classend(ms, p); /* points to what is next */
+ int m = ssrc_end && singlematch(uchar(*s), p, ep);
+ switch (*ep) {
+ case '?': { /* optional */
+ const char *res;
+ if (m && ((res=match(ms, s+1, ep+1)) != NULL))
+ return res;
+ p=ep+1; goto init; /* else return match(ms, s, ep+1); */
+ }
+ case '*': { /* 0 or more repetitions */
+ return max_expand(ms, s, p, ep);
+ }
+ case '+': { /* 1 or more repetitions */
+ return (m ? max_expand(ms, s+1, p, ep) : NULL);
+ }
+ case '-': { /* 0 or more repetitions (minimum) */
+ return min_expand(ms, s, p, ep);
+ }
+ default: {
+ if (!m) return NULL;
+ s++; p=ep; goto init; /* else return match(ms, s+1, ep); */
+ }
+ }
+ }
+ }
+}
+
+
+
+static const char *lmemfind (const char *s1, size_t l1,
+ const char *s2, size_t l2) {
+ if (l2 == 0) return s1; /* empty strings are everywhere */
+ else if (l2 > l1) return NULL; /* avoids a negative `l1' */
+ else {
+ const char *init; /* to search for a `*s2' inside `s1' */
+ l2--; /* 1st char will be checked by `memchr' */
+ l1 = l1-l2; /* `s2' cannot be found after that */
+ while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
+ init++; /* 1st char is already checked */
+ if (memcmp(init, s2+1, l2) == 0)
+ return init-1;
+ else { /* correct `l1' and `s1' to try again */
+ l1 -= init-s1;
+ s1 = init;
+ }
+ }
+ return NULL; /* not found */
+ }
+}
+
+
+static void push_onecapture (MatchState *ms, int i, const char *s,
+ const char *e) {
+ if (i >= ms->level) {
+ if (i == 0) /* ms->level == 0, too */
+ lua_pushlstring(ms->L, s, e - s); /* add whole match */
+ else
+ luaL_error(ms->L, "invalid capture index");
+ }
+ else {
+ ptrdiff_t l = ms->capture[i].len;
+ if (l == CAP_UNFINISHED) luaL_error(ms->L, "unfinished capture");
+ if (l == CAP_POSITION)
+ lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
+ else
+ lua_pushlstring(ms->L, ms->capture[i].init, l);
+ }
+}
+
+
+static int push_captures (MatchState *ms, const char *s, const char *e) {
+ int i;
+ int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
+ luaL_checkstack(ms->L, nlevels, "too many captures");
+ for (i = 0; i < nlevels; i++)
+ push_onecapture(ms, i, s, e);
+ return nlevels; /* number of strings pushed */
+}
+
+
+static int str_find_aux (lua_State *L, int find) {
+ size_t l1, l2;
+ const char *s = luaL_checklstring(L, 1, &l1);
+ const char *p = luaL_checklstring(L, 2, &l2);
+ ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
+ if (init < 0) init = 0;
+ else if ((size_t)(init) > l1) init = (ptrdiff_t)l1;
+ if (find && (lua_toboolean(L, 4) || /* explicit request? */
+ strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
+ /* do a plain search */
+ const char *s2 = lmemfind(s+init, l1-init, p, l2);
+ if (s2) {
+ lua_pushinteger(L, s2-s+1);
+ lua_pushinteger(L, s2-s+l2);
+ return 2;
+ }
+ }
+ else {
+ MatchState ms;
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ const char *s1=s+init;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+l1;
+ do {
+ const char *res;
+ ms.level = 0;
+ if ((res=match(&ms, s1, p)) != NULL) {
+ if (find) {
+ lua_pushinteger(L, s1-s+1); /* start */
+ lua_pushinteger(L, res-s); /* end */
+ return push_captures(&ms, NULL, 0) + 2;
+ }
+ else
+ return push_captures(&ms, s1, res);
+ }
+ } while (s1++ < ms.src_end && !anchor);
+ }
+ lua_pushnil(L); /* not found */
+ return 1;
+}
+
+
+static int str_find (lua_State *L) {
+ return str_find_aux(L, 1);
+}
+
+
+static int str_match (lua_State *L) {
+ return str_find_aux(L, 0);
+}
+
+
+static int gmatch_aux (lua_State *L) {
+ MatchState ms;
+ size_t ls;
+ const char *s = lua_tolstring(L, lua_upvalueindex(1), &ls);
+ const char *p = lua_tostring(L, lua_upvalueindex(2));
+ const char *src;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+ls;
+ for (src = s + (size_t)lua_tointeger(L, lua_upvalueindex(3));
+ src <= ms.src_end;
+ src++) {
+ const char *e;
+ ms.level = 0;
+ if ((e = match(&ms, src, p)) != NULL) {
+ lua_Integer newstart = e-s;
+ if (e == src) newstart++; /* empty match? go at least one position */
+ lua_pushinteger(L, newstart);
+ lua_replace(L, lua_upvalueindex(3));
+ return push_captures(&ms, src, e);
+ }
+ }
+ return 0; /* not found */
+}
+
+
+static int gmatch (lua_State *L) {
+ luaL_checkstring(L, 1);
+ luaL_checkstring(L, 2);
+ lua_settop(L, 2);
+ lua_pushinteger(L, 0);
+ lua_pushcclosure(L, gmatch_aux, 3);
+ return 1;
+}
+
+
+static int gfind_nodef (lua_State *L) {
+ return luaL_error(L, LUA_QL("string.gfind") " was renamed to "
+ LUA_QL("string.gmatch"));
+}
+
+
+static void add_s (MatchState *ms, luaL_Buffer *b, const char *s,
+ const char *e) {
+ size_t l, i;
+ const char *news = lua_tolstring(ms->L, 3, &l);
+ for (i = 0; i < l; i++) {
+ if (news[i] != L_ESC)
+ luaL_addchar(b, news[i]);
+ else {
+ i++; /* skip ESC */
+ if (!isdigit(uchar(news[i])))
+ luaL_addchar(b, news[i]);
+ else if (news[i] == '0')
+ luaL_addlstring(b, s, e - s);
+ else {
+ push_onecapture(ms, news[i] - '1', s, e);
+ luaL_addvalue(b); /* add capture to accumulated result */
+ }
+ }
+ }
+}
+
+
+static void add_value (MatchState *ms, luaL_Buffer *b, const char *s,
+ const char *e) {
+ lua_State *L = ms->L;
+ switch (lua_type(L, 3)) {
+ case LUA_TNUMBER:
+ case LUA_TSTRING: {
+ add_s(ms, b, s, e);
+ return;
+ }
+ case LUA_TFUNCTION: {
+ int n;
+ lua_pushvalue(L, 3);
+ n = push_captures(ms, s, e);
+ lua_call(L, n, 1);
+ break;
+ }
+ case LUA_TTABLE: {
+ push_onecapture(ms, 0, s, e);
+ lua_gettable(L, 3);
+ break;
+ }
+ default: {
+ luaL_argerror(L, 3, "string/function/table expected");
+ return;
+ }
+ }
+ if (!lua_toboolean(L, -1)) { /* nil or false? */
+ lua_pop(L, 1);
+ lua_pushlstring(L, s, e - s); /* keep original text */
+ }
+ else if (!lua_isstring(L, -1))
+ luaL_error(L, "invalid replacement value (a %s)", luaL_typename(L, -1));
+ luaL_addvalue(b); /* add result to accumulator */
+}
+
+
+static int str_gsub (lua_State *L) {
+ size_t srcl;
+ const char *src = luaL_checklstring(L, 1, &srcl);
+ const char *p = luaL_checkstring(L, 2);
+ int max_s = luaL_optint(L, 4, srcl+1);
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ int n = 0;
+ MatchState ms;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ ms.L = L;
+ ms.src_init = src;
+ ms.src_end = src+srcl;
+ while (n < max_s) {
+ const char *e;
+ ms.level = 0;
+ e = match(&ms, src, p);
+ if (e) {
+ n++;
+ add_value(&ms, &b, src, e);
+ }
+ if (e && e>src) /* non empty match? */
+ src = e; /* skip it */
+ else if (src < ms.src_end)
+ luaL_addchar(&b, *src++);
+ else break;
+ if (anchor) break;
+ }
+ luaL_addlstring(&b, src, ms.src_end-src);
+ luaL_pushresult(&b);
+ lua_pushinteger(L, n); /* number of substitutions */
+ return 2;
+}
+
+/* }====================================================== */
+
+
+/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
+#define MAX_ITEM 512
+/* valid flags in a format specification */
+#define FLAGS "-+ #0"
+/*
+** maximum size of each format specification (such as '%-099.99d')
+** (+10 accounts for %99.99x plus margin of error)
+*/
+#define MAX_FORMAT (sizeof(FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
+
+
+static void addquoted (lua_State *L, luaL_Buffer *b, int arg) {
+ size_t l;
+ const char *s = luaL_checklstring(L, arg, &l);
+ luaL_addchar(b, '"');
+ while (l--) {
+ switch (*s) {
+ case '"': case '\\': case '\n': {
+ luaL_addchar(b, '\\');
+ luaL_addchar(b, *s);
+ break;
+ }
+ case '\0': {
+ luaL_addlstring(b, "\\000", 4);
+ break;
+ }
+ default: {
+ luaL_addchar(b, *s);
+ break;
+ }
+ }
+ s++;
+ }
+ luaL_addchar(b, '"');
+}
+
+static const char *scanformat (lua_State *L, const char *strfrmt, char *form) {
+ const char *p = strfrmt;
+ while (strchr(FLAGS, *p)) p++; /* skip flags */
+ if ((size_t)(p - strfrmt) >= sizeof(FLAGS))
+ luaL_error(L, "invalid format (repeated flags)");
+ if (isdigit(uchar(*p))) p++; /* skip width */
+ if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ if (*p == '.') {
+ p++;
+ if (isdigit(uchar(*p))) p++; /* skip precision */
+ if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ }
+ if (isdigit(uchar(*p)))
+ luaL_error(L, "invalid format (width or precision too long)");
+ *(form++) = '%';
+ strncpy(form, strfrmt, p - strfrmt + 1);
+ form += p - strfrmt + 1;
+ *form = '\0';
+ return p;
+}
+
+
+static void addintlen (char *form) {
+ size_t l = strlen(form);
+ char spec = form[l - 1];
+ strcpy(form + l - 1, LUA_INTFRMLEN);
+ form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
+ form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
+}
+
+
+static int str_format (lua_State *L) {
+ int arg = 1;
+ size_t sfl;
+ const char *strfrmt = luaL_checklstring(L, arg, &sfl);
+ const char *strfrmt_end = strfrmt+sfl;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while (strfrmt < strfrmt_end) {
+ if (*strfrmt != L_ESC)
+ luaL_addchar(&b, *strfrmt++);
+ else if (*++strfrmt == L_ESC)
+ luaL_addchar(&b, *strfrmt++); /* %% */
+ else { /* format item */
+ char form[MAX_FORMAT]; /* to store the format (`%...') */
+ char buff[MAX_ITEM]; /* to store the formatted item */
+ arg++;
+ strfrmt = scanformat(L, strfrmt, form);
+ switch (*strfrmt++) {
+ case 'c': {
+ sprintf(buff, form, (int)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'd': case 'i': {
+ addintlen(form);
+ sprintf(buff, form, (LUA_INTFRM_T)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'o': case 'u': case 'x': case 'X': {
+ addintlen(form);
+ sprintf(buff, form, (unsigned LUA_INTFRM_T)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'e': case 'E': case 'f':
+ case 'g': case 'G': {
+ sprintf(buff, form, (double)luaL_checknumber(L, arg));
+ break;
+ }
+ case 'q': {
+ addquoted(L, &b, arg);
+ continue; /* skip the 'addsize' at the end */
+ }
+ case 's': {
+ size_t l;
+ const char *s = luaL_checklstring(L, arg, &l);
+ if (!strchr(form, '.') && l >= 100) {
+ /* no precision and string is too long to be formatted;
+ keep original string */
+ lua_pushvalue(L, arg);
+ luaL_addvalue(&b);
+ continue; /* skip the `addsize' at the end */
+ }
+ else {
+ sprintf(buff, form, s);
+ break;
+ }
+ }
+ default: { /* also treat cases `pnLlh' */
+ return luaL_error(L, "invalid option to " LUA_QL("format"));
+ }
+ }
+ luaL_addlstring(&b, buff, strlen(buff));
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+
+static const luaL_Reg strlib[] = {
+ {"byte", str_byte},
+ {"char", str_char},
+ {"dump", str_dump},
+ {"find", str_find},
+ {"format", str_format},
+ {"gfind", gfind_nodef},
+ {"gmatch", gmatch},
+ {"gsub", str_gsub},
+ {"len", str_len},
+ {"lower", str_lower},
+ {"match", str_match},
+ {"rep", str_rep},
+ {"reverse", str_reverse},
+ {"sub", str_sub},
+ {"upper", str_upper},
+ {NULL, NULL}
+};
+
+
+static void createmetatable (lua_State *L) {
+ lua_createtable(L, 0, 1); /* create metatable for strings */
+ lua_pushliteral(L, ""); /* dummy string */
+ lua_pushvalue(L, -2);
+ lua_setmetatable(L, -2); /* set string metatable */
+ lua_pop(L, 1); /* pop dummy string */
+ lua_pushvalue(L, -2); /* string library... */
+ lua_setfield(L, -2, "__index"); /* ...is the __index metamethod */
+ lua_pop(L, 1); /* pop metatable */
+}
+
+
+/*
+** Open string library
+*/
+LUALIB_API int luaopen_string (lua_State *L) {
+ luaL_register(L, LUA_STRLIBNAME, strlib);
+#if defined(LUA_COMPAT_GFIND)
+ lua_getfield(L, -1, "gmatch");
+ lua_setfield(L, -2, "gfind");
+#endif
+ createmetatable(L);
+ return 1;
+}
+
diff --git a/deps/lua/src/ltable.c b/deps/lua/src/ltable.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc91cacd637fe4563637f8c02740dc57a9cf50de
--- /dev/null
+++ b/deps/lua/src/ltable.c
@@ -0,0 +1,588 @@
+/*
+** $Id: ltable.c,v 2.32 2006/01/18 11:49:02 roberto Exp $
+** Lua tables (hash)
+** See Copyright Notice in lua.h
+*/
+
+
+/*
+** Implementation of tables (aka arrays, objects, or hash tables).
+** Tables keep its elements in two parts: an array part and a hash part.
+** Non-negative integer keys are all candidates to be kept in the array
+** part. The actual size of the array is the largest `n' such that at
+** least half the slots between 0 and n are in use.
+** Hash uses a mix of chained scatter table with Brent's variation.
+** A main invariant of these tables is that, if an element is not
+** in its main position (i.e. the `original' position that its hash gives
+** to it), then the colliding element is in its own main position.
+** Hence even when the load factor reaches 100%, performance remains good.
+*/
+
+#include
+#include
+
+#define ltable_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lgc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lstate.h"
+#include "ltable.h"
+
+
+/*
+** max size of array part is 2^MAXBITS
+*/
+#if LUAI_BITSINT > 26
+#define MAXBITS 26
+#else
+#define MAXBITS (LUAI_BITSINT-2)
+#endif
+
+#define MAXASIZE (1 << MAXBITS)
+
+
+#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
+
+#define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
+#define hashboolean(t,p) hashpow2(t, p)
+
+
+/*
+** for some types, it is better to avoid modulus by power of 2, as
+** they tend to have many 2 factors.
+*/
+#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
+
+
+#define hashpointer(t,p) hashmod(t, IntPoint(p))
+
+
+/*
+** number of ints inside a lua_Number
+*/
+#define numints cast_int(sizeof(lua_Number)/sizeof(int))
+
+
+
+#define dummynode (&dummynode_)
+
+static const Node dummynode_ = {
+ {{NULL}, LUA_TNIL}, /* value */
+ {{{NULL}, LUA_TNIL, NULL}} /* key */
+};
+
+
+/*
+** hash for lua_Numbers
+*/
+static Node *hashnum (const Table *t, lua_Number n) {
+ unsigned int a[numints];
+ int i;
+ n += 1; /* normalize number (avoid -0) */
+ lua_assert(sizeof(a) <= sizeof(n));
+ memcpy(a, &n, sizeof(a));
+ for (i = 1; i < numints; i++) a[0] += a[i];
+ return hashmod(t, a[0]);
+}
+
+
+
+/*
+** returns the `main' position of an element in a table (that is, the index
+** of its hash value)
+*/
+static Node *mainposition (const Table *t, const TValue *key) {
+ switch (ttype(key)) {
+ case LUA_TNUMBER:
+ return hashnum(t, nvalue(key));
+ case LUA_TSTRING:
+ return hashstr(t, rawtsvalue(key));
+ case LUA_TBOOLEAN:
+ return hashboolean(t, bvalue(key));
+ case LUA_TLIGHTUSERDATA:
+ return hashpointer(t, pvalue(key));
+ default:
+ return hashpointer(t, gcvalue(key));
+ }
+}
+
+
+/*
+** returns the index for `key' if `key' is an appropriate key to live in
+** the array part of the table, -1 otherwise.
+*/
+static int arrayindex (const TValue *key) {
+ if (ttisnumber(key)) {
+ lua_Number n = nvalue(key);
+ int k;
+ lua_number2int(k, n);
+ if (luai_numeq(cast_num(k), n))
+ return k;
+ }
+ return -1; /* `key' did not match some condition */
+}
+
+
+/*
+** returns the index of a `key' for table traversals. First goes all
+** elements in the array part, then elements in the hash part. The
+** beginning of a traversal is signalled by -1.
+*/
+static int findindex (lua_State *L, Table *t, StkId key) {
+ int i;
+ if (ttisnil(key)) return -1; /* first iteration */
+ i = arrayindex(key);
+ if (0 < i && i <= t->sizearray) /* is `key' inside array part? */
+ return i-1; /* yes; that's the index (corrected to C) */
+ else {
+ Node *n = mainposition(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ /* key may be dead already, but it is ok to use it in `next' */
+ if (luaO_rawequalObj(key2tval(n), key) ||
+ (ttype(gkey(n)) == LUA_TDEADKEY && iscollectable(key) &&
+ gcvalue(gkey(n)) == gcvalue(key))) {
+ i = cast_int(n - gnode(t, 0)); /* key index in hash table */
+ /* hash elements are numbered after array ones */
+ return i + t->sizearray;
+ }
+ else n = gnext(n);
+ } while (n);
+ luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */
+ return 0; /* to avoid warnings */
+ }
+}
+
+
+int luaH_next (lua_State *L, Table *t, StkId key) {
+ int i = findindex(L, t, key); /* find original element */
+ for (i++; i < t->sizearray; i++) { /* try first array part */
+ if (!ttisnil(&t->array[i])) { /* a non-nil value? */
+ setnvalue(key, cast_num(i+1));
+ setobj2s(L, key+1, &t->array[i]);
+ return 1;
+ }
+ }
+ for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
+ if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */
+ setobj2s(L, key, key2tval(gnode(t, i)));
+ setobj2s(L, key+1, gval(gnode(t, i)));
+ return 1;
+ }
+ }
+ return 0; /* no more elements */
+}
+
+
+/*
+** {=============================================================
+** Rehash
+** ==============================================================
+*/
+
+
+static int computesizes (int nums[], int *narray) {
+ int i;
+ int twotoi; /* 2^i */
+ int a = 0; /* number of elements smaller than 2^i */
+ int na = 0; /* number of elements to go to array part */
+ int n = 0; /* optimal size for array part */
+ for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
+ if (nums[i] > 0) {
+ a += nums[i];
+ if (a > twotoi/2) { /* more than half elements present? */
+ n = twotoi; /* optimal size (till now) */
+ na = a; /* all elements smaller than n will go to array part */
+ }
+ }
+ if (a == *narray) break; /* all elements already counted */
+ }
+ *narray = n;
+ lua_assert(*narray/2 <= na && na <= *narray);
+ return na;
+}
+
+
+static int countint (const TValue *key, int *nums) {
+ int k = arrayindex(key);
+ if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */
+ nums[ceillog2(k)]++; /* count as such */
+ return 1;
+ }
+ else
+ return 0;
+}
+
+
+static int numusearray (const Table *t, int *nums) {
+ int lg;
+ int ttlg; /* 2^lg */
+ int ause = 0; /* summation of `nums' */
+ int i = 1; /* count to traverse all array keys */
+ for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */
+ int lc = 0; /* counter */
+ int lim = ttlg;
+ if (lim > t->sizearray) {
+ lim = t->sizearray; /* adjust upper limit */
+ if (i > lim)
+ break; /* no more elements to count */
+ }
+ /* count elements in range (2^(lg-1), 2^lg] */
+ for (; i <= lim; i++) {
+ if (!ttisnil(&t->array[i-1]))
+ lc++;
+ }
+ nums[lg] += lc;
+ ause += lc;
+ }
+ return ause;
+}
+
+
+static int numusehash (const Table *t, int *nums, int *pnasize) {
+ int totaluse = 0; /* total number of elements */
+ int ause = 0; /* summation of `nums' */
+ int i = sizenode(t);
+ while (i--) {
+ Node *n = &t->node[i];
+ if (!ttisnil(gval(n))) {
+ ause += countint(key2tval(n), nums);
+ totaluse++;
+ }
+ }
+ *pnasize += ause;
+ return totaluse;
+}
+
+
+static void setarrayvector (lua_State *L, Table *t, int size) {
+ int i;
+ luaM_reallocvector(L, t->array, t->sizearray, size, TValue);
+ for (i=t->sizearray; iarray[i]);
+ t->sizearray = size;
+}
+
+
+static void setnodevector (lua_State *L, Table *t, int size) {
+ int lsize;
+ if (size == 0) { /* no elements to hash part? */
+ t->node = cast(Node *, dummynode); /* use common `dummynode' */
+ lsize = 0;
+ }
+ else {
+ int i;
+ lsize = ceillog2(size);
+ if (lsize > MAXBITS)
+ luaG_runerror(L, "table overflow");
+ size = twoto(lsize);
+ t->node = luaM_newvector(L, size, Node);
+ for (i=0; ilsizenode = cast_byte(lsize);
+ t->lastfree = gnode(t, size); /* all positions are free */
+}
+
+
+static void resize (lua_State *L, Table *t, int nasize, int nhsize) {
+ int i;
+ int oldasize = t->sizearray;
+ int oldhsize = t->lsizenode;
+ Node *nold = t->node; /* save old hash ... */
+ if (nasize > oldasize) /* array part must grow? */
+ setarrayvector(L, t, nasize);
+ /* create new hash part with appropriate size */
+ setnodevector(L, t, nhsize);
+ if (nasize < oldasize) { /* array part must shrink? */
+ t->sizearray = nasize;
+ /* re-insert elements from vanishing slice */
+ for (i=nasize; iarray[i]))
+ setobjt2t(L, luaH_setnum(L, t, i+1), &t->array[i]);
+ }
+ /* shrink array */
+ luaM_reallocvector(L, t->array, oldasize, nasize, TValue);
+ }
+ /* re-insert elements from hash part */
+ for (i = twoto(oldhsize) - 1; i >= 0; i--) {
+ Node *old = nold+i;
+ if (!ttisnil(gval(old)))
+ setobjt2t(L, luaH_set(L, t, key2tval(old)), gval(old));
+ }
+ if (nold != dummynode)
+ luaM_freearray(L, nold, twoto(oldhsize), Node); /* free old array */
+}
+
+
+void luaH_resizearray (lua_State *L, Table *t, int nasize) {
+ int nsize = (t->node == dummynode) ? 0 : sizenode(t);
+ resize(L, t, nasize, nsize);
+}
+
+
+static void rehash (lua_State *L, Table *t, const TValue *ek) {
+ int nasize, na;
+ int nums[MAXBITS+1]; /* nums[i] = number of keys between 2^(i-1) and 2^i */
+ int i;
+ int totaluse;
+ for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */
+ nasize = numusearray(t, nums); /* count keys in array part */
+ totaluse = nasize; /* all those keys are integer keys */
+ totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
+ /* count extra key */
+ nasize += countint(ek, nums);
+ totaluse++;
+ /* compute new size for array part */
+ na = computesizes(nums, &nasize);
+ /* resize the table to new computed sizes */
+ resize(L, t, nasize, totaluse - na);
+}
+
+
+
+/*
+** }=============================================================
+*/
+
+
+Table *luaH_new (lua_State *L, int narray, int nhash) {
+ Table *t = luaM_new(L, Table);
+ luaC_link(L, obj2gco(t), LUA_TTABLE);
+ t->metatable = NULL;
+ t->flags = cast_byte(~0);
+ /* temporary values (kept only if some malloc fails) */
+ t->array = NULL;
+ t->sizearray = 0;
+ t->lsizenode = 0;
+ t->node = cast(Node *, dummynode);
+ setarrayvector(L, t, narray);
+ setnodevector(L, t, nhash);
+ return t;
+}
+
+
+void luaH_free (lua_State *L, Table *t) {
+ if (t->node != dummynode)
+ luaM_freearray(L, t->node, sizenode(t), Node);
+ luaM_freearray(L, t->array, t->sizearray, TValue);
+ luaM_free(L, t);
+}
+
+
+static Node *getfreepos (Table *t) {
+ while (t->lastfree-- > t->node) {
+ if (ttisnil(gkey(t->lastfree)))
+ return t->lastfree;
+ }
+ return NULL; /* could not find a free place */
+}
+
+
+
+/*
+** inserts a new key into a hash table; first, check whether key's main
+** position is free. If not, check whether colliding node is in its main
+** position or not: if it is not, move colliding node to an empty place and
+** put new key in its main position; otherwise (colliding node is in its main
+** position), new key goes to an empty position.
+*/
+static TValue *newkey (lua_State *L, Table *t, const TValue *key) {
+ Node *mp = mainposition(t, key);
+ if (!ttisnil(gval(mp)) || mp == dummynode) {
+ Node *othern;
+ Node *n = getfreepos(t); /* get a free place */
+ if (n == NULL) { /* cannot find a free place? */
+ rehash(L, t, key); /* grow table */
+ return luaH_set(L, t, key); /* re-insert key into grown table */
+ }
+ lua_assert(n != dummynode);
+ othern = mainposition(t, key2tval(mp));
+ if (othern != mp) { /* is colliding node out of its main position? */
+ /* yes; move colliding node into free position */
+ while (gnext(othern) != mp) othern = gnext(othern); /* find previous */
+ gnext(othern) = n; /* redo the chain with `n' in place of `mp' */
+ *n = *mp; /* copy colliding node into free pos. (mp->next also goes) */
+ gnext(mp) = NULL; /* now `mp' is free */
+ setnilvalue(gval(mp));
+ }
+ else { /* colliding node is in its own main position */
+ /* new node will go into free position */
+ gnext(n) = gnext(mp); /* chain new position */
+ gnext(mp) = n;
+ mp = n;
+ }
+ }
+ gkey(mp)->value = key->value; gkey(mp)->tt = key->tt;
+ luaC_barriert(L, t, key);
+ lua_assert(ttisnil(gval(mp)));
+ return gval(mp);
+}
+
+
+/*
+** search function for integers
+*/
+const TValue *luaH_getnum (Table *t, int key) {
+ /* (1 <= key && key <= t->sizearray) */
+ if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray))
+ return &t->array[key-1];
+ else {
+ lua_Number nk = cast_num(key);
+ Node *n = hashnum(t, nk);
+ do { /* check whether `key' is somewhere in the chain */
+ if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+ }
+}
+
+
+/*
+** search function for strings
+*/
+const TValue *luaH_getstr (Table *t, TString *key) {
+ Node *n = hashstr(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ if (ttisstring(gkey(n)) && rawtsvalue(gkey(n)) == key)
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+}
+
+
+/*
+** main search function
+*/
+const TValue *luaH_get (Table *t, const TValue *key) {
+ switch (ttype(key)) {
+ case LUA_TNIL: return luaO_nilobject;
+ case LUA_TSTRING: return luaH_getstr(t, rawtsvalue(key));
+ case LUA_TNUMBER: {
+ int k;
+ lua_Number n = nvalue(key);
+ lua_number2int(k, n);
+ if (luai_numeq(cast_num(k), nvalue(key))) /* index is int? */
+ return luaH_getnum(t, k); /* use specialized version */
+ /* else go through */
+ }
+ default: {
+ Node *n = mainposition(t, key);
+ do { /* check whether `key' is somewhere in the chain */
+ if (luaO_rawequalObj(key2tval(n), key))
+ return gval(n); /* that's it */
+ else n = gnext(n);
+ } while (n);
+ return luaO_nilobject;
+ }
+ }
+}
+
+
+TValue *luaH_set (lua_State *L, Table *t, const TValue *key) {
+ const TValue *p = luaH_get(t, key);
+ t->flags = 0;
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ if (ttisnil(key)) luaG_runerror(L, "table index is nil");
+ else if (ttisnumber(key) && luai_numisnan(nvalue(key)))
+ luaG_runerror(L, "table index is NaN");
+ return newkey(L, t, key);
+ }
+}
+
+
+TValue *luaH_setnum (lua_State *L, Table *t, int key) {
+ const TValue *p = luaH_getnum(t, key);
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ TValue k;
+ setnvalue(&k, cast_num(key));
+ return newkey(L, t, &k);
+ }
+}
+
+
+TValue *luaH_setstr (lua_State *L, Table *t, TString *key) {
+ const TValue *p = luaH_getstr(t, key);
+ if (p != luaO_nilobject)
+ return cast(TValue *, p);
+ else {
+ TValue k;
+ setsvalue(L, &k, key);
+ return newkey(L, t, &k);
+ }
+}
+
+
+static int unbound_search (Table *t, unsigned int j) {
+ unsigned int i = j; /* i is zero or a present index */
+ j++;
+ /* find `i' and `j' such that i is present and j is not */
+ while (!ttisnil(luaH_getnum(t, j))) {
+ i = j;
+ j *= 2;
+ if (j > cast(unsigned int, MAX_INT)) { /* overflow? */
+ /* table was built with bad purposes: resort to linear search */
+ i = 1;
+ while (!ttisnil(luaH_getnum(t, i))) i++;
+ return i - 1;
+ }
+ }
+ /* now do a binary search between them */
+ while (j - i > 1) {
+ unsigned int m = (i+j)/2;
+ if (ttisnil(luaH_getnum(t, m))) j = m;
+ else i = m;
+ }
+ return i;
+}
+
+
+/*
+** Try to find a boundary in table `t'. A `boundary' is an integer index
+** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
+*/
+int luaH_getn (Table *t) {
+ unsigned int j = t->sizearray;
+ if (j > 0 && ttisnil(&t->array[j - 1])) {
+ /* there is a boundary in the array part: (binary) search for it */
+ unsigned int i = 0;
+ while (j - i > 1) {
+ unsigned int m = (i+j)/2;
+ if (ttisnil(&t->array[m - 1])) j = m;
+ else i = m;
+ }
+ return i;
+ }
+ /* else must find a boundary in hash part */
+ else if (t->node == dummynode) /* hash part is empty? */
+ return j; /* that is easy... */
+ else return unbound_search(t, j);
+}
+
+
+
+#if defined(LUA_DEBUG)
+
+Node *luaH_mainposition (const Table *t, const TValue *key) {
+ return mainposition(t, key);
+}
+
+int luaH_isdummy (Node *n) { return n == dummynode; }
+
+#endif
diff --git a/deps/lua/src/ltable.h b/deps/lua/src/ltable.h
new file mode 100644
index 0000000000000000000000000000000000000000..09193cdbe048c9a19b8277f9c2bfae4af98591cf
--- /dev/null
+++ b/deps/lua/src/ltable.h
@@ -0,0 +1,40 @@
+/*
+** $Id: ltable.h,v 2.10 2006/01/10 13:13:06 roberto Exp $
+** Lua tables (hash)
+** See Copyright Notice in lua.h
+*/
+
+#ifndef ltable_h
+#define ltable_h
+
+#include "lobject.h"
+
+
+#define gnode(t,i) (&(t)->node[i])
+#define gkey(n) (&(n)->i_key.nk)
+#define gval(n) (&(n)->i_val)
+#define gnext(n) ((n)->i_key.nk.next)
+
+#define key2tval(n) (&(n)->i_key.tvk)
+
+
+LUAI_FUNC const TValue *luaH_getnum (Table *t, int key);
+LUAI_FUNC TValue *luaH_setnum (lua_State *L, Table *t, int key);
+LUAI_FUNC const TValue *luaH_getstr (Table *t, TString *key);
+LUAI_FUNC TValue *luaH_setstr (lua_State *L, Table *t, TString *key);
+LUAI_FUNC const TValue *luaH_get (Table *t, const TValue *key);
+LUAI_FUNC TValue *luaH_set (lua_State *L, Table *t, const TValue *key);
+LUAI_FUNC Table *luaH_new (lua_State *L, int narray, int lnhash);
+LUAI_FUNC void luaH_resizearray (lua_State *L, Table *t, int nasize);
+LUAI_FUNC void luaH_free (lua_State *L, Table *t);
+LUAI_FUNC int luaH_next (lua_State *L, Table *t, StkId key);
+LUAI_FUNC int luaH_getn (Table *t);
+
+
+#if defined(LUA_DEBUG)
+LUAI_FUNC Node *luaH_mainposition (const Table *t, const TValue *key);
+LUAI_FUNC int luaH_isdummy (Node *n);
+#endif
+
+
+#endif
diff --git a/deps/lua/src/ltablib.c b/deps/lua/src/ltablib.c
new file mode 100644
index 0000000000000000000000000000000000000000..453b23b378bc5a215905cc974a9d981827b989a8
--- /dev/null
+++ b/deps/lua/src/ltablib.c
@@ -0,0 +1,278 @@
+/*
+** $Id: ltablib.c,v 1.38 2005/10/23 17:38:15 roberto Exp $
+** Library for Table Manipulation
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define ltablib_c
+#define LUA_LIB
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+#define aux_getn(L,n) (luaL_checktype(L, n, LUA_TTABLE), luaL_getn(L, n))
+
+
+static int foreachi (lua_State *L) {
+ int i;
+ int n = aux_getn(L, 1);
+ luaL_checktype(L, 2, LUA_TFUNCTION);
+ for (i=1; i <= n; i++) {
+ lua_pushvalue(L, 2); /* function */
+ lua_pushinteger(L, i); /* 1st argument */
+ lua_rawgeti(L, 1, i); /* 2nd argument */
+ lua_call(L, 2, 1);
+ if (!lua_isnil(L, -1))
+ return 1;
+ lua_pop(L, 1); /* remove nil result */
+ }
+ return 0;
+}
+
+
+static int foreach (lua_State *L) {
+ luaL_checktype(L, 1, LUA_TTABLE);
+ luaL_checktype(L, 2, LUA_TFUNCTION);
+ lua_pushnil(L); /* first key */
+ while (lua_next(L, 1)) {
+ lua_pushvalue(L, 2); /* function */
+ lua_pushvalue(L, -3); /* key */
+ lua_pushvalue(L, -3); /* value */
+ lua_call(L, 2, 1);
+ if (!lua_isnil(L, -1))
+ return 1;
+ lua_pop(L, 2); /* remove value and result */
+ }
+ return 0;
+}
+
+
+static int maxn (lua_State *L) {
+ lua_Number max = 0;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_pushnil(L); /* first key */
+ while (lua_next(L, 1)) {
+ lua_pop(L, 1); /* remove value */
+ if (lua_type(L, -1) == LUA_TNUMBER) {
+ lua_Number v = lua_tonumber(L, -1);
+ if (v > max) max = v;
+ }
+ }
+ lua_pushnumber(L, max);
+ return 1;
+}
+
+
+static int getn (lua_State *L) {
+ lua_pushinteger(L, aux_getn(L, 1));
+ return 1;
+}
+
+
+static int setn (lua_State *L) {
+ luaL_checktype(L, 1, LUA_TTABLE);
+#ifndef luaL_setn
+ luaL_setn(L, 1, luaL_checkint(L, 2));
+#else
+ luaL_error(L, LUA_QL("setn") " is obsolete");
+#endif
+ lua_pushvalue(L, 1);
+ return 1;
+}
+
+
+static int tinsert (lua_State *L) {
+ int e = aux_getn(L, 1) + 1; /* first empty element */
+ int pos; /* where to insert new element */
+ switch (lua_gettop(L)) {
+ case 2: { /* called with only 2 arguments */
+ pos = e; /* insert new element at the end */
+ break;
+ }
+ case 3: {
+ int i;
+ pos = luaL_checkint(L, 2); /* 2nd argument is the position */
+ if (pos > e) e = pos; /* `grow' array if necessary */
+ for (i = e; i > pos; i--) { /* move up elements */
+ lua_rawgeti(L, 1, i-1);
+ lua_rawseti(L, 1, i); /* t[i] = t[i-1] */
+ }
+ break;
+ }
+ default: {
+ return luaL_error(L, "wrong number of arguments to " LUA_QL("insert"));
+ }
+ }
+ luaL_setn(L, 1, e); /* new size */
+ lua_rawseti(L, 1, pos); /* t[pos] = v */
+ return 0;
+}
+
+
+static int tremove (lua_State *L) {
+ int e = aux_getn(L, 1);
+ int pos = luaL_optint(L, 2, e);
+ if (e == 0) return 0; /* table is `empty' */
+ luaL_setn(L, 1, e - 1); /* t.n = n-1 */
+ lua_rawgeti(L, 1, pos); /* result = t[pos] */
+ for ( ;pos= P */
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
+ if (i>u) luaL_error(L, "invalid order function for sorting");
+ lua_pop(L, 1); /* remove a[i] */
+ }
+ /* repeat --j until a[j] <= P */
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
+ if (j
+
+#define ltm_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "lobject.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+
+
+
+const char *const luaT_typenames[] = {
+ "nil", "boolean", "userdata", "number",
+ "string", "table", "function", "userdata", "thread",
+ "proto", "upval"
+};
+
+
+void luaT_init (lua_State *L) {
+ static const char *const luaT_eventname[] = { /* ORDER TM */
+ "__index", "__newindex",
+ "__gc", "__mode", "__eq",
+ "__add", "__sub", "__mul", "__div", "__mod",
+ "__pow", "__unm", "__len", "__lt", "__le",
+ "__concat", "__call"
+ };
+ int i;
+ for (i=0; itmname[i] = luaS_new(L, luaT_eventname[i]);
+ luaS_fix(G(L)->tmname[i]); /* never collect these names */
+ }
+}
+
+
+/*
+** function to be used with macro "fasttm": optimized for absence of
+** tag methods
+*/
+const TValue *luaT_gettm (Table *events, TMS event, TString *ename) {
+ const TValue *tm = luaH_getstr(events, ename);
+ lua_assert(event <= TM_EQ);
+ if (ttisnil(tm)) { /* no tag method? */
+ events->flags |= cast_byte(1u<metatable;
+ break;
+ case LUA_TUSERDATA:
+ mt = uvalue(o)->metatable;
+ break;
+ default:
+ mt = G(L)->mt[ttype(o)];
+ }
+ return (mt ? luaH_getstr(mt, G(L)->tmname[event]) : luaO_nilobject);
+}
+
diff --git a/deps/lua/src/ltm.h b/deps/lua/src/ltm.h
new file mode 100644
index 0000000000000000000000000000000000000000..866c79668d89673f350d37d0408b37bd94965448
--- /dev/null
+++ b/deps/lua/src/ltm.h
@@ -0,0 +1,54 @@
+/*
+** $Id: ltm.h,v 2.6 2005/06/06 13:30:25 roberto Exp $
+** Tag methods
+** See Copyright Notice in lua.h
+*/
+
+#ifndef ltm_h
+#define ltm_h
+
+
+#include "lobject.h"
+
+
+/*
+* WARNING: if you change the order of this enumeration,
+* grep "ORDER TM"
+*/
+typedef enum {
+ TM_INDEX,
+ TM_NEWINDEX,
+ TM_GC,
+ TM_MODE,
+ TM_EQ, /* last tag method with `fast' access */
+ TM_ADD,
+ TM_SUB,
+ TM_MUL,
+ TM_DIV,
+ TM_MOD,
+ TM_POW,
+ TM_UNM,
+ TM_LEN,
+ TM_LT,
+ TM_LE,
+ TM_CONCAT,
+ TM_CALL,
+ TM_N /* number of elements in the enum */
+} TMS;
+
+
+
+#define gfasttm(g,et,e) ((et) == NULL ? NULL : \
+ ((et)->flags & (1u<<(e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e]))
+
+#define fasttm(l,et,e) gfasttm(G(l), et, e)
+
+LUAI_DATA const char *const luaT_typenames[];
+
+
+LUAI_FUNC const TValue *luaT_gettm (Table *events, TMS event, TString *ename);
+LUAI_FUNC const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o,
+ TMS event);
+LUAI_FUNC void luaT_init (lua_State *L);
+
+#endif
diff --git a/deps/lua/src/lua.c b/deps/lua/src/lua.c
new file mode 100644
index 0000000000000000000000000000000000000000..6df527db30fbd04bd9597cb610622a7e17a5002c
--- /dev/null
+++ b/deps/lua/src/lua.c
@@ -0,0 +1,377 @@
+/*
+** $Id: lua.c,v 1.157 2005/12/29 16:23:32 roberto Exp $
+** Lua stand-alone interpreter
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+#include
+
+#define lua_c
+
+#include "lua.h"
+
+#include "lauxlib.h"
+#include "lualib.h"
+
+
+
+static lua_State *globalL = NULL;
+
+static const char *progname = LUA_PROGNAME;
+
+
+
+static void lstop (lua_State *L, lua_Debug *ar) {
+ (void)ar; /* unused arg. */
+ lua_sethook(L, NULL, 0, 0);
+ luaL_error(L, "interrupted!");
+}
+
+
+static void laction (int i) {
+ signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
+ terminate process (default action) */
+ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
+}
+
+
+static void print_usage (void) {
+ fprintf(stderr,
+ "usage: %s [options] [script [args]].\n"
+ "Available options are:\n"
+ " -e stat execute string " LUA_QL("stat") "\n"
+ " -l name require library " LUA_QL("name") "\n"
+ " -i enter interactive mode after executing " LUA_QL("script") "\n"
+ " -v show version information\n"
+ " -- stop handling options\n"
+ " - execute stdin and stop handling options\n"
+ ,
+ progname);
+ fflush(stderr);
+}
+
+
+static void l_message (const char *pname, const char *msg) {
+ if (pname) fprintf(stderr, "%s: ", pname);
+ fprintf(stderr, "%s\n", msg);
+ fflush(stderr);
+}
+
+
+static int report (lua_State *L, int status) {
+ if (status && !lua_isnil(L, -1)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg == NULL) msg = "(error object is not a string)";
+ l_message(progname, msg);
+ lua_pop(L, 1);
+ }
+ return status;
+}
+
+
+static int traceback (lua_State *L) {
+ lua_getfield(L, LUA_GLOBALSINDEX, "debug");
+ if (!lua_istable(L, -1)) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ lua_getfield(L, -1, "traceback");
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2);
+ return 1;
+ }
+ lua_pushvalue(L, 1); /* pass error message */
+ lua_pushinteger(L, 2); /* skip this function and traceback */
+ lua_call(L, 2, 1); /* call debug.traceback */
+ return 1;
+}
+
+
+static int docall (lua_State *L, int narg, int clear) {
+ int status;
+ int base = lua_gettop(L) - narg; /* function index */
+ lua_pushcfunction(L, traceback); /* push traceback function */
+ lua_insert(L, base); /* put it under chunk and args */
+ signal(SIGINT, laction);
+ status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
+ signal(SIGINT, SIG_DFL);
+ lua_remove(L, base); /* remove traceback function */
+ /* force a complete garbage collection in case of errors */
+ if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0);
+ return status;
+}
+
+
+static void print_version (void) {
+ l_message(NULL, LUA_VERSION " " LUA_COPYRIGHT);
+}
+
+
+static int getargs (lua_State *L, char **argv, int n) {
+ int narg;
+ int i;
+ int argc = 0;
+ while (argv[argc]) argc++; /* count total number of arguments */
+ narg = argc - (n + 1); /* number of arguments to the script */
+ luaL_checkstack(L, narg + 3, "too many arguments to script");
+ for (i=n+1; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ lua_createtable(L, narg, n + 1);
+ for (i=0; i < argc; i++) {
+ lua_pushstring(L, argv[i]);
+ lua_rawseti(L, -2, i - n);
+ }
+ return narg;
+}
+
+
+static int dofile (lua_State *L, const char *name) {
+ int status = luaL_loadfile(L, name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+
+static int dostring (lua_State *L, const char *s, const char *name) {
+ int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+
+static int dolibrary (lua_State *L, const char *name) {
+ lua_getglobal(L, "require");
+ lua_pushstring(L, name);
+ return report(L, lua_pcall(L, 1, 0, 0));
+}
+
+
+static const char *get_prompt (lua_State *L, int firstline) {
+ const char *p;
+ lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
+ p = lua_tostring(L, -1);
+ if (p == NULL) p = (firstline ? LUA_PROMPT : LUA_PROMPT2);
+ lua_pop(L, 1); /* remove global */
+ return p;
+}
+
+
+static int incomplete (lua_State *L, int status) {
+ if (status == LUA_ERRSYNTAX) {
+ size_t lmsg;
+ const char *msg = lua_tolstring(L, -1, &lmsg);
+ const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1);
+ if (strstr(msg, LUA_QL("")) == tp) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ }
+ return 0; /* else... */
+}
+
+
+static int pushline (lua_State *L, int firstline) {
+ char buffer[LUA_MAXINPUT];
+ char *b = buffer;
+ size_t l;
+ const char *prmt = get_prompt(L, firstline);
+ if (lua_readline(L, b, prmt) == 0)
+ return 0; /* no input */
+ l = strlen(b);
+ if (l > 0 && b[l-1] == '\n') /* line ends with newline? */
+ b[l-1] = '\0'; /* remove it */
+ if (firstline && b[0] == '=') /* first line starts with `=' ? */
+ lua_pushfstring(L, "return %s", b+1); /* change it to `return' */
+ else
+ lua_pushstring(L, b);
+ lua_freeline(L, b);
+ return 1;
+}
+
+
+static int loadline (lua_State *L) {
+ int status;
+ lua_settop(L, 0);
+ if (!pushline(L, 1))
+ return -1; /* no input */
+ for (;;) { /* repeat until gets a complete line */
+ status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
+ if (!incomplete(L, status)) break; /* cannot try to add lines? */
+ if (!pushline(L, 0)) /* no more input? */
+ return -1;
+ lua_pushliteral(L, "\n"); /* add a new line... */
+ lua_insert(L, -2); /* ...between the two lines */
+ lua_concat(L, 3); /* join them */
+ }
+ lua_saveline(L, 1);
+ lua_remove(L, 1); /* remove line */
+ return status;
+}
+
+
+static void dotty (lua_State *L) {
+ int status;
+ const char *oldprogname = progname;
+ progname = NULL;
+ while ((status = loadline(L)) != -1) {
+ if (status == 0) status = docall(L, 0, 0);
+ report(L, status);
+ if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */
+ lua_getglobal(L, "print");
+ lua_insert(L, 1);
+ if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
+ l_message(progname, lua_pushfstring(L,
+ "error calling " LUA_QL("print") " (%s)",
+ lua_tostring(L, -1)));
+ }
+ }
+ lua_settop(L, 0); /* clear stack */
+ fputs("\n", stdout);
+ fflush(stdout);
+ progname = oldprogname;
+}
+
+
+static int handle_script (lua_State *L, char **argv, int n) {
+ int status;
+ const char *fname;
+ int narg = getargs(L, argv, n); /* collect arguments */
+ lua_setglobal(L, "arg");
+ fname = argv[n];
+ if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
+ fname = NULL; /* stdin */
+ status = luaL_loadfile(L, fname);
+ lua_insert(L, -(narg+1));
+ if (status == 0)
+ status = docall(L, narg, 0);
+ else
+ lua_pop(L, narg);
+ return report(L, status);
+}
+
+
+static int collectargs (char **argv, int *pi, int *pv, int *pe) {
+ int i;
+ for (i = 1; argv[i] != NULL; i++) {
+ if (argv[i][0] != '-') /* not an option? */
+ return i;
+ switch (argv[i][1]) { /* option */
+ case '-': return (argv[i+1] != NULL ? i+1 : 0);
+ case '\0': return i;
+ case 'i': *pi = 1; /* go through */
+ case 'v': *pv = 1; break;
+ case 'e': *pe = 1; /* go through */
+ case 'l':
+ if (argv[i][2] == '\0') {
+ i++;
+ if (argv[i] == NULL) return -1;
+ }
+ break;
+ default: return -1; /* invalid option */
+ }
+ }
+ return 0;
+}
+
+
+static int runargs (lua_State *L, char **argv, int n) {
+ int i;
+ for (i = 1; i < n; i++) {
+ if (argv[i] == NULL) continue;
+ lua_assert(argv[i][0] == '-');
+ switch (argv[i][1]) { /* option */
+ case 'e': {
+ const char *chunk = argv[i] + 2;
+ if (*chunk == '\0') chunk = argv[++i];
+ lua_assert(chunk != NULL);
+ if (dostring(L, chunk, "=(command line)") != 0)
+ return 1;
+ break;
+ }
+ case 'l': {
+ const char *filename = argv[i] + 2;
+ if (*filename == '\0') filename = argv[++i];
+ lua_assert(filename != NULL);
+ if (dolibrary(L, filename))
+ return 1; /* stop if file fails */
+ break;
+ }
+ default: break;
+ }
+ }
+ return 0;
+}
+
+
+static int handle_luainit (lua_State *L) {
+ const char *init = getenv("LUA_INIT");
+ if (init == NULL) return 0; /* status OK */
+ else if (init[0] == '@')
+ return dofile(L, init+1);
+ else
+ return dostring(L, init, "=LUA_INIT");
+}
+
+
+struct Smain {
+ int argc;
+ char **argv;
+ int status;
+};
+
+
+static int pmain (lua_State *L) {
+ struct Smain *s = (struct Smain *)lua_touserdata(L, 1);
+ char **argv = s->argv;
+ int script;
+ int has_i = 0, has_v = 0, has_e = 0;
+ globalL = L;
+ if (argv[0] && argv[0][0]) progname = argv[0];
+ lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */
+ luaL_openlibs(L); /* open libraries */
+ lua_gc(L, LUA_GCRESTART, 0);
+ s->status = handle_luainit(L);
+ if (s->status != 0) return 0;
+ script = collectargs(argv, &has_i, &has_v, &has_e);
+ if (script < 0) { /* invalid args? */
+ print_usage();
+ s->status = 1;
+ return 0;
+ }
+ if (has_v) print_version();
+ s->status = runargs(L, argv, (script > 0) ? script : s->argc);
+ if (s->status != 0) return 0;
+ if (script)
+ s->status = handle_script(L, argv, script);
+ if (s->status != 0) return 0;
+ if (has_i)
+ dotty(L);
+ else if (script == 0 && !has_e && !has_v) {
+ if (lua_stdin_is_tty()) {
+ print_version();
+ dotty(L);
+ }
+ else dofile(L, NULL); /* executes stdin as a file */
+ }
+ return 0;
+}
+
+
+int main (int argc, char **argv) {
+ int status;
+ struct Smain s;
+ lua_State *L = lua_open(); /* create state */
+ if (L == NULL) {
+ l_message(argv[0], "cannot create state: not enough memory");
+ return EXIT_FAILURE;
+ }
+ s.argc = argc;
+ s.argv = argv;
+ status = lua_cpcall(L, &pmain, &s);
+ report(L, status);
+ lua_close(L);
+ return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/deps/lua/src/lua.h b/deps/lua/src/lua.h
new file mode 100644
index 0000000000000000000000000000000000000000..881f834555945d01b101984eb2eea22881258dee
--- /dev/null
+++ b/deps/lua/src/lua.h
@@ -0,0 +1,384 @@
+/*
+** $Id: lua.h,v 1.216 2006/01/10 12:50:13 roberto Exp $
+** Lua - An Extensible Extension Language
+** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
+** See Copyright Notice at the end of this file
+*/
+
+
+#ifndef lua_h
+#define lua_h
+
+#include
+#include
+
+
+#include "luaconf.h"
+
+
+#define LUA_VERSION "Lua 5.1"
+#define LUA_VERSION_NUM 501
+#define LUA_COPYRIGHT "Copyright (C) 1994-2006 Lua.org, PUC-Rio"
+#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
+
+
+/* mark for precompiled code (`Lua') */
+#define LUA_SIGNATURE "\033Lua"
+
+/* option for multiple returns in `lua_pcall' and `lua_call' */
+#define LUA_MULTRET (-1)
+
+
+/*
+** pseudo-indices
+*/
+#define LUA_REGISTRYINDEX (-10000)
+#define LUA_ENVIRONINDEX (-10001)
+#define LUA_GLOBALSINDEX (-10002)
+#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
+
+
+/* thread status; 0 is OK */
+#define LUA_YIELD 1
+#define LUA_ERRRUN 2
+#define LUA_ERRSYNTAX 3
+#define LUA_ERRMEM 4
+#define LUA_ERRERR 5
+
+
+typedef struct lua_State lua_State;
+
+typedef int (*lua_CFunction) (lua_State *L);
+
+
+/*
+** functions that read/write blocks when loading/dumping Lua chunks
+*/
+typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
+
+typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
+
+
+/*
+** prototype for memory-allocation functions
+*/
+typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
+
+
+/*
+** basic types
+*/
+#define LUA_TNONE (-1)
+
+#define LUA_TNIL 0
+#define LUA_TBOOLEAN 1
+#define LUA_TLIGHTUSERDATA 2
+#define LUA_TNUMBER 3
+#define LUA_TSTRING 4
+#define LUA_TTABLE 5
+#define LUA_TFUNCTION 6
+#define LUA_TUSERDATA 7
+#define LUA_TTHREAD 8
+
+
+
+/* minimum Lua stack available to a C function */
+#define LUA_MINSTACK 20
+
+
+/*
+** generic extra include file
+*/
+#if defined(LUA_USER_H)
+#include LUA_USER_H
+#endif
+
+
+/* type of numbers in Lua */
+typedef LUA_NUMBER lua_Number;
+
+
+/* type for integer functions */
+typedef LUA_INTEGER lua_Integer;
+
+
+
+/*
+** state manipulation
+*/
+LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
+LUA_API void (lua_close) (lua_State *L);
+LUA_API lua_State *(lua_newthread) (lua_State *L);
+
+LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
+
+
+/*
+** basic stack manipulation
+*/
+LUA_API int (lua_gettop) (lua_State *L);
+LUA_API void (lua_settop) (lua_State *L, int idx);
+LUA_API void (lua_pushvalue) (lua_State *L, int idx);
+LUA_API void (lua_remove) (lua_State *L, int idx);
+LUA_API void (lua_insert) (lua_State *L, int idx);
+LUA_API void (lua_replace) (lua_State *L, int idx);
+LUA_API int (lua_checkstack) (lua_State *L, int sz);
+
+LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
+
+
+/*
+** access functions (stack -> C)
+*/
+
+LUA_API int (lua_isnumber) (lua_State *L, int idx);
+LUA_API int (lua_isstring) (lua_State *L, int idx);
+LUA_API int (lua_iscfunction) (lua_State *L, int idx);
+LUA_API int (lua_isuserdata) (lua_State *L, int idx);
+LUA_API int (lua_type) (lua_State *L, int idx);
+LUA_API const char *(lua_typename) (lua_State *L, int tp);
+
+LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
+
+LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
+LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
+LUA_API int (lua_toboolean) (lua_State *L, int idx);
+LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
+LUA_API size_t (lua_objlen) (lua_State *L, int idx);
+LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
+LUA_API void *(lua_touserdata) (lua_State *L, int idx);
+LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
+LUA_API const void *(lua_topointer) (lua_State *L, int idx);
+
+
+/*
+** push functions (C -> stack)
+*/
+LUA_API void (lua_pushnil) (lua_State *L);
+LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
+LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
+LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
+LUA_API void (lua_pushstring) (lua_State *L, const char *s);
+LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
+ va_list argp);
+LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
+LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
+LUA_API void (lua_pushboolean) (lua_State *L, int b);
+LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
+LUA_API int (lua_pushthread) (lua_State *L);
+
+
+/*
+** get functions (Lua -> stack)
+*/
+LUA_API void (lua_gettable) (lua_State *L, int idx);
+LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawget) (lua_State *L, int idx);
+LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
+LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
+LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
+LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
+LUA_API void (lua_getfenv) (lua_State *L, int idx);
+
+
+/*
+** set functions (stack -> Lua)
+*/
+LUA_API void (lua_settable) (lua_State *L, int idx);
+LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawset) (lua_State *L, int idx);
+LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
+LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
+LUA_API int (lua_setfenv) (lua_State *L, int idx);
+
+
+/*
+** `load' and `call' functions (load and run Lua code)
+*/
+LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
+LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
+LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
+LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname);
+
+LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
+
+
+/*
+** coroutine functions
+*/
+LUA_API int (lua_yield) (lua_State *L, int nresults);
+LUA_API int (lua_resume) (lua_State *L, int narg);
+LUA_API int (lua_status) (lua_State *L);
+
+/*
+** garbage-collection function and options
+*/
+
+#define LUA_GCSTOP 0
+#define LUA_GCRESTART 1
+#define LUA_GCCOLLECT 2
+#define LUA_GCCOUNT 3
+#define LUA_GCCOUNTB 4
+#define LUA_GCSTEP 5
+#define LUA_GCSETPAUSE 6
+#define LUA_GCSETSTEPMUL 7
+
+LUA_API int (lua_gc) (lua_State *L, int what, int data);
+
+
+/*
+** miscellaneous functions
+*/
+
+LUA_API int (lua_error) (lua_State *L);
+
+LUA_API int (lua_next) (lua_State *L, int idx);
+
+LUA_API void (lua_concat) (lua_State *L, int n);
+
+LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
+LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define lua_pop(L,n) lua_settop(L, -(n)-1)
+
+#define lua_newtable(L) lua_createtable(L, 0, 0)
+
+#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
+
+#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
+
+#define lua_strlen(L,i) lua_objlen(L, (i))
+
+#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
+#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
+#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
+#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
+#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
+#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
+#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
+#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
+
+#define lua_pushliteral(L, s) \
+ lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
+
+#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
+#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
+
+#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
+
+
+
+/*
+** compatibility macros and functions
+*/
+
+#define lua_open() luaL_newstate()
+
+#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
+
+#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
+
+#define lua_Chunkreader lua_Reader
+#define lua_Chunkwriter lua_Writer
+
+
+
+/*
+** {======================================================================
+** Debug API
+** =======================================================================
+*/
+
+
+/*
+** Event codes
+*/
+#define LUA_HOOKCALL 0
+#define LUA_HOOKRET 1
+#define LUA_HOOKLINE 2
+#define LUA_HOOKCOUNT 3
+#define LUA_HOOKTAILRET 4
+
+
+/*
+** Event masks
+*/
+#define LUA_MASKCALL (1 << LUA_HOOKCALL)
+#define LUA_MASKRET (1 << LUA_HOOKRET)
+#define LUA_MASKLINE (1 << LUA_HOOKLINE)
+#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
+
+typedef struct lua_Debug lua_Debug; /* activation record */
+
+
+/* Functions to be called by the debuger in specific events */
+typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+
+
+LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+
+LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
+LUA_API lua_Hook lua_gethook (lua_State *L);
+LUA_API int lua_gethookmask (lua_State *L);
+LUA_API int lua_gethookcount (lua_State *L);
+
+
+struct lua_Debug {
+ int event;
+ const char *name; /* (n) */
+ const char *namewhat; /* (n) `global', `local', `field', `method' */
+ const char *what; /* (S) `Lua', `C', `main', `tail' */
+ const char *source; /* (S) */
+ int currentline; /* (l) */
+ int nups; /* (u) number of upvalues */
+ int linedefined; /* (S) */
+ int lastlinedefined; /* (S) */
+ char short_src[LUA_IDSIZE]; /* (S) */
+ /* private part */
+ int i_ci; /* active function */
+};
+
+/* }====================================================================== */
+
+
+/******************************************************************************
+* Copyright (C) 1994-2006 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+
+#endif
diff --git a/deps/lua/src/luac.c b/deps/lua/src/luac.c
new file mode 100644
index 0000000000000000000000000000000000000000..2dd76b76535f75e35271f465909307551afdf5be
--- /dev/null
+++ b/deps/lua/src/luac.c
@@ -0,0 +1,196 @@
+/*
+** $Id: luac.c,v 1.52 2005/11/11 14:03:13 lhf Exp $
+** Lua compiler (saves bytecodes to files; also list bytecodes)
+** See Copyright Notice in lua.h
+*/
+
+#include
+#include
+#include
+#include
+
+#define luac_c
+#define LUA_CORE
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "ldo.h"
+#include "lfunc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lstring.h"
+#include "lundump.h"
+
+#define PROGNAME "luac" /* default program name */
+#define OUTPUT PROGNAME ".out" /* default output file */
+
+static int listing=0; /* list bytecodes? */
+static int dumping=1; /* dump bytecodes? */
+static int stripping=0; /* strip debug information? */
+static char Output[]={ OUTPUT }; /* default output file name */
+static const char* output=Output; /* actual output file name */
+static const char* progname=PROGNAME; /* actual program name */
+
+static void fatal(const char* message)
+{
+ fprintf(stderr,"%s: %s\n",progname,message);
+ exit(EXIT_FAILURE);
+}
+
+static void cannot(const char* what)
+{
+ fprintf(stderr,"%s: cannot %s %s: %s\n",progname,what,output,strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+static void usage(const char* message)
+{
+ if (*message=='-')
+ fprintf(stderr,"%s: unrecognized option " LUA_QS "\n",progname,message);
+ else
+ fprintf(stderr,"%s: %s\n",progname,message);
+ fprintf(stderr,
+ "usage: %s [options] [filenames].\n"
+ "Available options are:\n"
+ " - process stdin\n"
+ " -l list\n"
+ " -o name output to file " LUA_QL("name") " (default is \"%s\")\n"
+ " -p parse only\n"
+ " -s strip debug information\n"
+ " -v show version information\n"
+ " -- stop handling options\n",
+ progname,Output);
+ exit(EXIT_FAILURE);
+}
+
+#define IS(s) (strcmp(argv[i],s)==0)
+
+static int doargs(int argc, char* argv[])
+{
+ int i;
+ if (argv[0]!=NULL && *argv[0]!=0) progname=argv[0];
+ for (i=1; itop+(i))->l.p)
+
+static Proto* combine(lua_State* L, int n)
+{
+ if (n==1)
+ return toproto(L,-1);
+ else
+ {
+ int i,pc;
+ Proto* f=luaF_newproto(L);
+ setptvalue2s(L,L->top,f); incr_top(L);
+ f->source=luaS_newliteral(L,"=(" PROGNAME ")");
+ f->maxstacksize=1;
+ pc=2*n+1;
+ f->code=luaM_newvector(L,pc,Instruction);
+ f->sizecode=pc;
+ f->p=luaM_newvector(L,n,Proto*);
+ f->sizep=n;
+ pc=0;
+ for (i=0; ip[i]=toproto(L,i-n-1);
+ f->code[pc++]=CREATE_ABx(OP_CLOSURE,0,i);
+ f->code[pc++]=CREATE_ABC(OP_CALL,0,1,1);
+ }
+ f->code[pc++]=CREATE_ABC(OP_RETURN,0,1,0);
+ return f;
+ }
+}
+
+static int writer(lua_State* L, const void* p, size_t size, void* u)
+{
+ UNUSED(L);
+ return (fwrite(p,size,1,(FILE*)u)!=1) && (size!=0);
+}
+
+struct Smain {
+ int argc;
+ char** argv;
+};
+
+static int pmain(lua_State* L)
+{
+ struct Smain* s = (struct Smain*)lua_touserdata(L, 1);
+ int argc=s->argc;
+ char** argv=s->argv;
+ Proto* f;
+ int i;
+ if (!lua_checkstack(L,argc)) fatal("too many input files");
+ for (i=0; i1);
+ if (dumping)
+ {
+ FILE* D= (output==NULL) ? stdout : fopen(output,"wb");
+ if (D==NULL) cannot("open");
+ lua_lock(L);
+ luaU_dump(L,f,writer,D,stripping);
+ lua_unlock(L);
+ if (ferror(D)) cannot("write");
+ if (fclose(D)) cannot("close");
+ }
+ return 0;
+}
+
+int main(int argc, char* argv[])
+{
+ lua_State* L;
+ struct Smain s;
+ int i=doargs(argc,argv);
+ argc-=i; argv+=i;
+ if (argc<=0) usage("no input files given");
+ L=lua_open();
+ if (L==NULL) fatal("not enough memory for state");
+ s.argc=argc;
+ s.argv=argv;
+ if (lua_cpcall(L,pmain,&s)!=0) fatal(lua_tostring(L,-1));
+ lua_close(L);
+ return EXIT_SUCCESS;
+}
diff --git a/deps/lua/src/luaconf.h b/deps/lua/src/luaconf.h
new file mode 100644
index 0000000000000000000000000000000000000000..97a3e30c0eb1cf207125434c1bbda5966f0d209f
--- /dev/null
+++ b/deps/lua/src/luaconf.h
@@ -0,0 +1,736 @@
+/*
+** $Id: luaconf.h,v 1.81 2006/02/10 17:44:06 roberto Exp $
+** Configuration file for Lua
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lconfig_h
+#define lconfig_h
+
+#include
+#include
+
+
+/*
+** ==================================================================
+** Search for "@@" to find all configurable definitions.
+** ===================================================================
+*/
+
+
+/*
+@@ LUA_ANSI controls the use of non-ansi features.
+** CHANGE it (define it) if you want Lua to avoid the use of any
+** non-ansi feature or library.
+*/
+#if defined(__STRICT_ANSI__)
+#define LUA_ANSI
+#endif
+
+
+#if !defined(LUA_ANSI) && defined(_WIN32)
+#define LUA_WIN
+#endif
+
+#if defined(LUA_USE_LINUX)
+#define LUA_USE_POSIX
+#define LUA_USE_DLOPEN /* needs an extra library: -ldl */
+#define LUA_USE_READLINE /* needs some extra libraries */
+#endif
+
+#if defined(LUA_USE_MACOSX)
+#define LUA_USE_POSIX
+#define LUA_DL_DYLD /* does not need extra library */
+#endif
+
+
+
+/*
+@@ LUA_USE_POSIX includes all functionallity listed as X/Open System
+@* Interfaces Extension (XSI).
+** CHANGE it (define it) if your system is XSI compatible.
+*/
+#if defined(LUA_USE_POSIX)
+#define LUA_USE_MKSTEMP
+#define LUA_USE_ISATTY
+#define LUA_USE_POPEN
+#define LUA_USE_ULONGJMP
+#endif
+
+
+/*
+@@ LUA_PATH_DEFAULT is the default path that Lua uses to look for
+@* Lua libraries.
+@@ LUA_CPATH_DEFAULT is the default path that Lua uses to look for
+@* C libraries.
+** CHANGE them if your machine has a non-conventional directory
+** hierarchy or if you want to install your libraries in
+** non-conventional directories.
+*/
+#if defined(_WIN32)
+/*
+** In Windows, any exclamation mark ('!') in the path is replaced by the
+** path of the directory of the executable file of the current process.
+*/
+#define LUA_LDIR "!\\lua\\"
+#define LUA_CDIR "!\\"
+#define LUA_PATH_DEFAULT \
+ ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" \
+ LUA_CDIR"?.lua;" LUA_CDIR"?\\init.lua"
+#define LUA_CPATH_DEFAULT \
+ ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
+
+#else
+#define LUA_ROOT "/usr/local/"
+#define LUA_LDIR LUA_ROOT "share/lua/5.1/"
+#define LUA_CDIR LUA_ROOT "lib/lua/5.1/"
+#define LUA_PATH_DEFAULT \
+ "./?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua;" \
+ LUA_CDIR"?.lua;" LUA_CDIR"?/init.lua"
+#define LUA_CPATH_DEFAULT \
+ "./?.so;" LUA_CDIR"?.so;" LUA_CDIR"loadall.so"
+#endif
+
+
+/*
+@@ LUA_DIRSEP is the directory separator (for submodules).
+** CHANGE it if your machine does not use "/" as the directory separator
+** and is not Windows. (On Windows Lua automatically uses "\".)
+*/
+#if defined(_WIN32)
+#define LUA_DIRSEP "\\"
+#else
+#define LUA_DIRSEP "/"
+#endif
+
+
+/*
+@@ LUA_PATHSEP is the character that separates templates in a path.
+@@ LUA_PATH_MARK is the string that marks the substitution points in a
+@* template.
+@@ LUA_EXECDIR in a Windows path is replaced by the executable's
+@* directory.
+@@ LUA_IGMARK is a mark to ignore all before it when bulding the
+@* luaopen_ function name.
+** CHANGE them if for some reason your system cannot use those
+** characters. (E.g., if one of those characters is a common character
+** in file/directory names.) Probably you do not need to change them.
+*/
+#define LUA_PATHSEP ";"
+#define LUA_PATH_MARK "?"
+#define LUA_EXECDIR "!"
+#define LUA_IGMARK "-"
+
+
+/*
+@@ LUA_INTEGER is the integral type used by lua_pushinteger/lua_tointeger.
+** CHANGE that if ptrdiff_t is not adequate on your machine. (On most
+** machines, ptrdiff_t gives a good choice between int or long.)
+*/
+#define LUA_INTEGER ptrdiff_t
+
+
+/*
+@@ LUA_API is a mark for all core API functions.
+@@ LUALIB_API is a mark for all standard library functions.
+** CHANGE them if you need to define those functions in some special way.
+** For instance, if you want to create one Windows DLL with the core and
+** the libraries, you may want to use the following definition (define
+** LUA_BUILD_AS_DLL to get it).
+*/
+#if defined(LUA_BUILD_AS_DLL)
+
+#if defined(LUA_CORE) || defined(LUA_LIB)
+#define LUA_API __declspec(dllexport)
+#else
+#define LUA_API __declspec(dllimport)
+#endif
+
+#else
+
+#define LUA_API extern
+
+#endif
+
+/* more often than not the libs go together with the core */
+#define LUALIB_API LUA_API
+
+
+/*
+@@ LUAI_FUNC is a mark for all extern functions that are not to be
+@* exported to outside modules.
+@@ LUAI_DATA is a mark for all extern (const) variables that are not to
+@* be exported to outside modules.
+** CHANGE them if you need to mark them in some special way. Elf/gcc
+** (versions 3.2 and later) mark them as "hidden" to optimize access
+** when Lua is compiled as a shared library.
+*/
+#if defined(luaall_c)
+#define LUAI_FUNC static
+#define LUAI_DATA /* empty */
+
+#elif defined(__GNUC__) && ((__GNUC__*100 + __GNUC_MINOR__) >= 302) && \
+ defined(__ELF__)
+#define LUAI_FUNC __attribute__((visibility("hidden"))) extern
+#define LUAI_DATA LUAI_FUNC
+
+#else
+#define LUAI_FUNC extern
+#define LUAI_DATA extern
+#endif
+
+
+
+/*
+@@ LUA_QL describes how error messages quote program elements.
+** CHANGE it if you want a different appearance.
+*/
+#define LUA_QL(x) "'" x "'"
+#define LUA_QS LUA_QL("%s")
+
+
+/*
+@@ LUA_IDSIZE gives the maximum size for the description of the source
+@* of a function in debug information.
+** CHANGE it if you want a different size.
+*/
+#define LUA_IDSIZE 60
+
+
+/*
+** {==================================================================
+** Stand-alone configuration
+** ===================================================================
+*/
+
+#if defined(lua_c) || defined(luaall_c)
+
+/*
+@@ lua_stdin_is_tty detects whether the standard input is a 'tty' (that
+@* is, whether we're running lua interactively).
+** CHANGE it if you have a better definition for non-POSIX/non-Windows
+** systems.
+*/
+#if defined(LUA_USE_ISATTY)
+#include
+#define lua_stdin_is_tty() isatty(0)
+#elif defined(LUA_WIN)
+#include
+#include
+#define lua_stdin_is_tty() _isatty(_fileno(stdin))
+#else
+#define lua_stdin_is_tty() 1 /* assume stdin is a tty */
+#endif
+
+
+/*
+@@ LUA_PROMPT is the default prompt used by stand-alone Lua.
+@@ LUA_PROMPT2 is the default continuation prompt used by stand-alone Lua.
+** CHANGE them if you want different prompts. (You can also change the
+** prompts dynamically, assigning to globals _PROMPT/_PROMPT2.)
+*/
+#define LUA_PROMPT "> "
+#define LUA_PROMPT2 ">> "
+
+
+/*
+@@ LUA_PROGNAME is the default name for the stand-alone Lua program.
+** CHANGE it if your stand-alone interpreter has a different name and
+** your system is not able to detect that name automatically.
+*/
+#define LUA_PROGNAME "lua"
+
+
+/*
+@@ LUA_MAXINPUT is the maximum length for an input line in the
+@* stand-alone interpreter.
+** CHANGE it if you need longer lines.
+*/
+#define LUA_MAXINPUT 512
+
+
+/*
+@@ lua_readline defines how to show a prompt and then read a line from
+@* the standard input.
+@@ lua_saveline defines how to "save" a read line in a "history".
+@@ lua_freeline defines how to free a line read by lua_readline.
+** CHANGE them if you want to improve this functionality (e.g., by using
+** GNU readline and history facilities).
+*/
+#if defined(LUA_USE_READLINE)
+#include
+#include
+#include
+#define lua_readline(L,b,p) ((void)L, ((b)=readline(p)) != NULL)
+#define lua_saveline(L,idx) \
+ if (lua_strlen(L,idx) > 0) /* non-empty line? */ \
+ add_history(lua_tostring(L, idx)); /* add it to history */
+#define lua_freeline(L,b) ((void)L, free(b))
+#else
+#define lua_readline(L,b,p) \
+ ((void)L, fputs(p, stdout), fflush(stdout), /* show prompt */ \
+ fgets(b, LUA_MAXINPUT, stdin) != NULL) /* get line */
+#define lua_saveline(L,idx) { (void)L; (void)idx; }
+#define lua_freeline(L,b) { (void)L; (void)b; }
+#endif
+
+#endif
+
+/* }================================================================== */
+
+
+/*
+@@ LUAI_GCPAUSE defines the default pause between garbage-collector cycles
+@* as a percentage.
+** CHANGE it if you want the GC to run faster or slower (higher values
+** mean larger pauses which mean slower collection.) You can also change
+** this value dynamically.
+*/
+#define LUAI_GCPAUSE 200 /* 200% (wait memory to double before next GC) */
+
+
+/*
+@@ LUAI_GCMUL defines the default speed of garbage collection relative to
+@* memory allocation as a percentage.
+** CHANGE it if you want to change the granularity of the garbage
+** collection. (Higher values mean coarser collections. 0 represents
+** infinity, where each step performs a full collection.) You can also
+** change this value dynamically.
+*/
+#define LUAI_GCMUL 200 /* GC runs 'twice the speed' of memory allocation */
+
+
+
+/*
+@@ LUA_COMPAT_GETN controls compatibility with old getn behavior.
+** CHANGE it (define it) if you want exact compatibility with the
+** behavior of setn/getn in Lua 5.0.
+*/
+#undef LUA_COMPAT_GETN
+
+/*
+@@ LUA_COMPAT_LOADLIB controls compatibility about global loadlib.
+** CHANGE it to undefined as soon as you do not need a global 'loadlib'
+** function (the function is still available as 'package.loadlib').
+*/
+#undef LUA_COMPAT_LOADLIB
+
+/*
+@@ LUA_COMPAT_VARARG controls compatibility with old vararg feature.
+** CHANGE it to undefined as soon as your programs use only '...' to
+** access vararg parameters (instead of the old 'arg' table).
+*/
+#define LUA_COMPAT_VARARG
+
+/*
+@@ LUA_COMPAT_MOD controls compatibility with old math.mod function.
+** CHANGE it to undefined as soon as your programs use 'math.fmod' or
+** the new '%' operator instead of 'math.mod'.
+*/
+#define LUA_COMPAT_MOD
+
+/*
+@@ LUA_COMPAT_LSTR controls compatibility with old long string nesting
+@* facility.
+** CHANGE it to 2 if you want the old behaviour, or undefine it to turn
+** off the advisory error when nesting [[...]].
+*/
+#define LUA_COMPAT_LSTR 1
+
+/*
+@@ LUA_COMPAT_GFIND controls compatibility with old 'string.gfind' name.
+** CHANGE it to undefined as soon as you rename 'string.gfind' to
+** 'string.gmatch'.
+*/
+#define LUA_COMPAT_GFIND
+
+/*
+@@ LUA_COMPAT_OPENLIB controls compatibility with old 'luaL_openlib'
+@* behavior.
+** CHANGE it to undefined as soon as you replace to 'luaL_registry'
+** your uses of 'luaL_openlib'
+*/
+#define LUA_COMPAT_OPENLIB
+
+
+
+/*
+@@ luai_apicheck is the assert macro used by the Lua-C API.
+** CHANGE luai_apicheck if you want Lua to perform some checks in the
+** parameters it gets from API calls. This may slow down the interpreter
+** a bit, but may be quite useful when debugging C code that interfaces
+** with Lua. A useful redefinition is to use assert.h.
+*/
+#if defined(LUA_USE_APICHECK)
+#include
+#define luai_apicheck(L,o) { (void)L; assert(o); }
+#else
+#define luai_apicheck(L,o) { (void)L; }
+#endif
+
+
+/*
+@@ LUAI_BITSINT defines the number of bits in an int.
+** CHANGE here if Lua cannot automatically detect the number of bits of
+** your machine. Probably you do not need to change this.
+*/
+/* avoid overflows in comparison */
+#if INT_MAX-20 < 32760
+#define LUAI_BITSINT 16
+#elif INT_MAX > 2147483640L
+/* int has at least 32 bits */
+#define LUAI_BITSINT 32
+#else
+#error "you must define LUA_BITSINT with number of bits in an integer"
+#endif
+
+
+/*
+@@ LUAI_UINT32 is an unsigned integer with at least 32 bits.
+@@ LUAI_INT32 is an signed integer with at least 32 bits.
+@@ LUAI_UMEM is an unsigned integer big enough to count the total
+@* memory used by Lua.
+@@ LUAI_MEM is a signed integer big enough to count the total memory
+@* used by Lua.
+** CHANGE here if for some weird reason the default definitions are not
+** good enough for your machine. (The definitions in the 'else'
+** part always works, but may waste space on machines with 64-bit
+** longs.) Probably you do not need to change this.
+*/
+#if LUAI_BITSINT >= 32
+#define LUAI_UINT32 unsigned int
+#define LUAI_INT32 int
+#define LUAI_MAXINT32 INT_MAX
+#define LUAI_UMEM size_t
+#define LUAI_MEM ptrdiff_t
+#else
+/* 16-bit ints */
+#define LUAI_UINT32 unsigned long
+#define LUAI_INT32 long
+#define LUAI_MAXINT32 LONG_MAX
+#define LUAI_UMEM unsigned long
+#define LUAI_MEM long
+#endif
+
+
+/*
+@@ LUAI_MAXCALLS limits the number of nested calls.
+** CHANGE it if you need really deep recursive calls. This limit is
+** arbitrary; its only purpose is to stop infinite recursion before
+** exhausting memory.
+*/
+#define LUAI_MAXCALLS 20000
+
+
+/*
+@@ LUAI_MAXCSTACK limits the number of Lua stack slots that a C function
+@* can use.
+** CHANGE it if you need lots of (Lua) stack space for your C
+** functions. This limit is arbitrary; its only purpose is to stop C
+** functions to consume unlimited stack space.
+*/
+#define LUAI_MAXCSTACK 2048
+
+
+
+/*
+** {==================================================================
+** CHANGE (to smaller values) the following definitions if your system
+** has a small C stack. (Or you may want to change them to larger
+** values if your system has a large C stack and these limits are
+** too rigid for you.) Some of these constants control the size of
+** stack-allocated arrays used by the compiler or the interpreter, while
+** others limit the maximum number of recursive calls that the compiler
+** or the interpreter can perform. Values too large may cause a C stack
+** overflow for some forms of deep constructs.
+** ===================================================================
+*/
+
+
+/*
+@@ LUAI_MAXCCALLS is the maximum depth for nested C calls (short) and
+@* syntactical nested non-terminals in a program.
+*/
+#define LUAI_MAXCCALLS 200
+
+
+/*
+@@ LUAI_MAXVARS is the maximum number of local variables per function
+@* (must be smaller than 250).
+*/
+#define LUAI_MAXVARS 200
+
+
+/*
+@@ LUAI_MAXUPVALUES is the maximum number of upvalues per function
+@* (must be smaller than 250).
+*/
+#define LUAI_MAXUPVALUES 60
+
+
+/*
+@@ LUAL_BUFFERSIZE is the buffer size used by the lauxlib buffer system.
+*/
+#define LUAL_BUFFERSIZE BUFSIZ
+
+/* }================================================================== */
+
+
+
+
+/*
+** {==================================================================
+@@ LUA_NUMBER is the type of numbers in Lua.
+** CHANGE the following definitions only if you want to build Lua
+** with a number type different from double. You may also need to
+** change lua_number2int & lua_number2integer.
+** ===================================================================
+*/
+
+#define LUA_NUMBER_DOUBLE
+#define LUA_NUMBER double
+
+/*
+@@ LUAI_UACNUMBER is the result of an 'usual argument conversion'
+@* over a number.
+*/
+#define LUAI_UACNUMBER double
+
+
+/*
+@@ LUA_NUMBER_SCAN is the format for reading numbers.
+@@ LUA_NUMBER_FMT is the format for writing numbers.
+@@ lua_number2str converts a number to a string.
+@@ LUAI_MAXNUMBER2STR is maximum size of previous conversion.
+@@ lua_str2number converts a string to a number.
+*/
+#define LUA_NUMBER_SCAN "%lf"
+#define LUA_NUMBER_FMT "%.14g"
+#define lua_number2str(s,n) sprintf((s), LUA_NUMBER_FMT, (n))
+#define LUAI_MAXNUMBER2STR 32 /* 16 digits, sign, point, and \0 */
+#define lua_str2number(s,p) strtod((s), (p))
+
+
+/*
+@@ The luai_num* macros define the primitive operations over numbers.
+*/
+#if defined(LUA_CORE)
+#include
+#define luai_numadd(a,b) ((a)+(b))
+#define luai_numsub(a,b) ((a)-(b))
+#define luai_nummul(a,b) ((a)*(b))
+#define luai_numdiv(a,b) ((a)/(b))
+#define luai_nummod(a,b) ((a) - floor((a)/(b))*(b))
+#define luai_numpow(a,b) (pow(a,b))
+#define luai_numunm(a) (-(a))
+#define luai_numeq(a,b) ((a)==(b))
+#define luai_numlt(a,b) ((a)<(b))
+#define luai_numle(a,b) ((a)<=(b))
+#define luai_numisnan(a) (!luai_numeq((a), (a)))
+#endif
+
+
+/*
+@@ lua_number2int is a macro to convert lua_Number to int.
+@@ lua_number2integer is a macro to convert lua_Number to lua_Integer.
+** CHANGE them if you know a faster way to convert a lua_Number to
+** int (with any rounding method and without throwing errors) in your
+** system. In Pentium machines, a naive typecast from double to int
+** in C is extremely slow, so any alternative is worth trying.
+*/
+
+/* On a Pentium, resort to a trick */
+#if defined(LUA_NUMBER_DOUBLE) && !defined(LUA_ANSI) && !defined(__SSE2__) && \
+ (defined(__i386) || defined (_M_IX86) || defined(__i386__))
+union luai_Cast { double l_d; long l_l; };
+#define lua_number2int(i,d) \
+ { volatile union luai_Cast u; u.l_d = (d) + 6755399441055744.0; (i) = u.l_l; }
+#define lua_number2integer(i,n) lua_number2int(i, n)
+
+/* this option always works, but may be slow */
+#else
+#define lua_number2int(i,d) ((i)=(int)(d))
+#define lua_number2integer(i,d) ((i)=(lua_Integer)(d))
+
+#endif
+
+/* }================================================================== */
+
+
+/*
+@@ LUAI_USER_ALIGNMENT_T is a type that requires maximum alignment.
+** CHANGE it if your system requires alignments larger than double. (For
+** instance, if your system supports long doubles and they must be
+** aligned in 16-byte boundaries, then you should add long double in the
+** union.) Probably you do not need to change this.
+*/
+#define LUAI_USER_ALIGNMENT_T union { double u; void *s; long l; }
+
+
+/*
+@@ LUAI_THROW/LUAI_TRY define how Lua does exception handling.
+** CHANGE them if you prefer to use longjmp/setjmp even with C++
+** or if want/don't to use _longjmp/_setjmp instead of regular
+** longjmp/setjmp. By default, Lua handles errors with exceptions when
+** compiling as C++ code, with _longjmp/_setjmp when asked to use them,
+** and with longjmp/setjmp otherwise.
+*/
+#if defined(__cplusplus)
+/* C++ exceptions */
+#define LUAI_THROW(L,c) throw(c)
+#define LUAI_TRY(L,c,a) try { a } catch(...) \
+ { if ((c)->status == 0) (c)->status = -1; }
+#define luai_jmpbuf int /* dummy variable */
+
+#elif defined(LUA_USE_ULONGJMP)
+/* in Unix, try _longjmp/_setjmp (more efficient) */
+#define LUAI_THROW(L,c) _longjmp((c)->b, 1)
+#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a }
+#define luai_jmpbuf jmp_buf
+
+#else
+/* default handling with long jumps */
+#define LUAI_THROW(L,c) longjmp((c)->b, 1)
+#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a }
+#define luai_jmpbuf jmp_buf
+
+#endif
+
+
+/*
+@@ LUA_MAXCAPTURES is the maximum number of captures that a pattern
+@* can do during pattern-matching.
+** CHANGE it if you need more captures. This limit is arbitrary.
+*/
+#define LUA_MAXCAPTURES 32
+
+
+/*
+@@ lua_tmpnam is the function that the OS library uses to create a
+@* temporary name.
+@@ LUA_TMPNAMBUFSIZE is the maximum size of a name created by lua_tmpnam.
+** CHANGE them if you have an alternative to tmpnam (which is considered
+** insecure) or if you want the original tmpnam anyway. By default, Lua
+** uses tmpnam except when POSIX is available, where it uses mkstemp.
+*/
+#if defined(loslib_c) || defined(luaall_c)
+
+#if defined(LUA_USE_MKSTEMP)
+#include
+#define LUA_TMPNAMBUFSIZE 32
+#define lua_tmpnam(b,e) { \
+ strcpy(b, "/tmp/lua_XXXXXX"); \
+ e = mkstemp(b); \
+ if (e != -1) close(e); \
+ e = (e == -1); }
+
+#else
+#define LUA_TMPNAMBUFSIZE L_tmpnam
+#define lua_tmpnam(b,e) { e = (tmpnam(b) == NULL); }
+#endif
+
+#endif
+
+
+/*
+@@ lua_popen spawns a new process connected to the current one through
+@* the file streams.
+** CHANGE it if you have a way to implement it in your system.
+*/
+#if defined(LUA_USE_POPEN)
+
+#define lua_popen(L,c,m) ((void)L, popen(c,m))
+#define lua_pclose(L,file) ((void)L, (pclose(file) != -1))
+
+#elif defined(LUA_WIN)
+
+#define lua_popen(L,c,m) ((void)L, _popen(c,m))
+#define lua_pclose(L,file) ((void)L, (_pclose(file) != -1))
+
+#else
+
+#define lua_popen(L,c,m) ((void)((void)c, m), \
+ luaL_error(L, LUA_QL("popen") " not supported"), (FILE*)0)
+#define lua_pclose(L,file) ((void)((void)L, file), 0)
+
+#endif
+
+/*
+@@ LUA_DL_* define which dynamic-library system Lua should use.
+** CHANGE here if Lua has problems choosing the appropriate
+** dynamic-library system for your platform (either Windows' DLL, Mac's
+** dyld, or Unix's dlopen). If your system is some kind of Unix, there
+** is a good chance that it has dlopen, so LUA_DL_DLOPEN will work for
+** it. To use dlopen you also need to adapt the src/Makefile (probably
+** adding -ldl to the linker options), so Lua does not select it
+** automatically. (When you change the makefile to add -ldl, you must
+** also add -DLUA_USE_DLOPEN.)
+** If you do not want any kind of dynamic library, undefine all these
+** options.
+** By default, _WIN32 gets LUA_DL_DLL and MAC OS X gets LUA_DL_DYLD.
+*/
+#if defined(LUA_USE_DLOPEN)
+#define LUA_DL_DLOPEN
+#endif
+
+#if defined(LUA_WIN)
+#define LUA_DL_DLL
+#endif
+
+
+/*
+@@ LUAI_EXTRASPACE allows you to add user-specific data in a lua_State
+@* (the data goes just *before* the lua_State pointer).
+** CHANGE (define) this if you really need that. This value must be
+** a multiple of the maximum alignment required for your machine.
+*/
+#define LUAI_EXTRASPACE 0
+
+
+/*
+@@ luai_userstate* allow user-specific actions on threads.
+** CHANGE them if you defined LUAI_EXTRASPACE and need to do something
+** extra when a thread is created/deleted/resumed/yielded.
+*/
+#define luai_userstateopen(L) ((void)L)
+#define luai_userstateclose(L) ((void)L)
+#define luai_userstatethread(L,L1) ((void)L)
+#define luai_userstatefree(L) ((void)L)
+#define luai_userstateresume(L,n) ((void)L)
+#define luai_userstateyield(L,n) ((void)L)
+
+
+/*
+@@ LUA_INTFRMLEN is the length modifier for integer conversions
+@* in 'string.format'.
+@@ LUA_INTFRM_T is the integer type correspoding to the previous length
+@* modifier.
+** CHANGE them if your system supports long long or does not support long.
+*/
+
+#if defined(LUA_USELONGLONG)
+
+#define LUA_INTFRMLEN "ll"
+#define LUA_INTFRM_T long long
+
+#else
+
+#define LUA_INTFRMLEN "l"
+#define LUA_INTFRM_T long
+
+#endif
+
+
+
+/* =================================================================== */
+
+/*
+** Local configuration. You can use this space to add your redefinitions
+** without modifying the main part of the file.
+*/
+
+
+
+#endif
+
diff --git a/deps/lua/src/lualib.h b/deps/lua/src/lualib.h
new file mode 100644
index 0000000000000000000000000000000000000000..0c76232c0dd03737542483119723ebc2ae779c57
--- /dev/null
+++ b/deps/lua/src/lualib.h
@@ -0,0 +1,53 @@
+/*
+** $Id: lualib.h,v 1.36 2005/12/27 17:12:00 roberto Exp $
+** Lua standard libraries
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lualib_h
+#define lualib_h
+
+#include "lua.h"
+
+
+/* Key to file-handle type */
+#define LUA_FILEHANDLE "FILE*"
+
+
+#define LUA_COLIBNAME "coroutine"
+LUALIB_API int (luaopen_base) (lua_State *L);
+
+#define LUA_TABLIBNAME "table"
+LUALIB_API int (luaopen_table) (lua_State *L);
+
+#define LUA_IOLIBNAME "io"
+LUALIB_API int (luaopen_io) (lua_State *L);
+
+#define LUA_OSLIBNAME "os"
+LUALIB_API int (luaopen_os) (lua_State *L);
+
+#define LUA_STRLIBNAME "string"
+LUALIB_API int (luaopen_string) (lua_State *L);
+
+#define LUA_MATHLIBNAME "math"
+LUALIB_API int (luaopen_math) (lua_State *L);
+
+#define LUA_DBLIBNAME "debug"
+LUALIB_API int (luaopen_debug) (lua_State *L);
+
+#define LUA_LOADLIBNAME "package"
+LUALIB_API int (luaopen_package) (lua_State *L);
+
+
+/* open all previous libraries */
+LUALIB_API void (luaL_openlibs) (lua_State *L);
+
+
+
+#ifndef lua_assert
+#define lua_assert(x) ((void)0)
+#endif
+
+
+#endif
diff --git a/deps/lua/src/lundump.c b/deps/lua/src/lundump.c
new file mode 100644
index 0000000000000000000000000000000000000000..7fc635eeb7bebb9ba08ca0151ce24bf0c0acb910
--- /dev/null
+++ b/deps/lua/src/lundump.c
@@ -0,0 +1,223 @@
+/*
+** $Id: lundump.c,v 1.60 2006/02/16 15:53:49 lhf Exp $
+** load precompiled Lua chunks
+** See Copyright Notice in lua.h
+*/
+
+#include
+
+#define lundump_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lmem.h"
+#include "lobject.h"
+#include "lstring.h"
+#include "lundump.h"
+#include "lzio.h"
+
+typedef struct {
+ lua_State* L;
+ ZIO* Z;
+ Mbuffer* b;
+ const char* name;
+} LoadState;
+
+#ifdef LUAC_TRUST_BINARIES
+#define IF(c,s)
+#else
+#define IF(c,s) if (c) error(S,s)
+
+static void error(LoadState* S, const char* why)
+{
+ luaO_pushfstring(S->L,"%s: %s in precompiled chunk",S->name,why);
+ luaD_throw(S->L,LUA_ERRSYNTAX);
+}
+#endif
+
+#define LoadMem(S,b,n,size) LoadBlock(S,b,(n)*(size))
+#define LoadByte(S) (lu_byte)LoadChar(S)
+#define LoadVar(S,x) LoadMem(S,&x,1,sizeof(x))
+#define LoadVector(S,b,n,size) LoadMem(S,b,n,size)
+
+static void LoadBlock(LoadState* S, void* b, size_t size)
+{
+ size_t r=luaZ_read(S->Z,b,size);
+ IF (r!=0, "unexpected end");
+}
+
+static int LoadChar(LoadState* S)
+{
+ char x;
+ LoadVar(S,x);
+ return x;
+}
+
+static int LoadInt(LoadState* S)
+{
+ int x;
+ LoadVar(S,x);
+ IF (x<0, "bad integer");
+ return x;
+}
+
+static lua_Number LoadNumber(LoadState* S)
+{
+ lua_Number x;
+ LoadVar(S,x);
+ return x;
+}
+
+static TString* LoadString(LoadState* S)
+{
+ size_t size;
+ LoadVar(S,size);
+ if (size==0)
+ return NULL;
+ else
+ {
+ char* s=luaZ_openspace(S->L,S->b,size);
+ LoadBlock(S,s,size);
+ return luaS_newlstr(S->L,s,size-1); /* remove trailing '\0' */
+ }
+}
+
+static void LoadCode(LoadState* S, Proto* f)
+{
+ int n=LoadInt(S);
+ f->code=luaM_newvector(S->L,n,Instruction);
+ f->sizecode=n;
+ LoadVector(S,f->code,n,sizeof(Instruction));
+}
+
+static Proto* LoadFunction(LoadState* S, TString* p);
+
+static void LoadConstants(LoadState* S, Proto* f)
+{
+ int i,n;
+ n=LoadInt(S);
+ f->k=luaM_newvector(S->L,n,TValue);
+ f->sizek=n;
+ for (i=0; ik[i]);
+ for (i=0; ik[i];
+ int t=LoadChar(S);
+ switch (t)
+ {
+ case LUA_TNIL:
+ setnilvalue(o);
+ break;
+ case LUA_TBOOLEAN:
+ setbvalue(o,LoadChar(S));
+ break;
+ case LUA_TNUMBER:
+ setnvalue(o,LoadNumber(S));
+ break;
+ case LUA_TSTRING:
+ setsvalue2n(S->L,o,LoadString(S));
+ break;
+ default:
+ IF (1, "bad constant");
+ break;
+ }
+ }
+ n=LoadInt(S);
+ f->p=luaM_newvector(S->L,n,Proto*);
+ f->sizep=n;
+ for (i=0; ip[i]=NULL;
+ for (i=0; ip[i]=LoadFunction(S,f->source);
+}
+
+static void LoadDebug(LoadState* S, Proto* f)
+{
+ int i,n;
+ n=LoadInt(S);
+ f->lineinfo=luaM_newvector(S->L,n,int);
+ f->sizelineinfo=n;
+ LoadVector(S,f->lineinfo,n,sizeof(int));
+ n=LoadInt(S);
+ f->locvars=luaM_newvector(S->L,n,LocVar);
+ f->sizelocvars=n;
+ for (i=0; ilocvars[i].varname=NULL;
+ for (i=0; ilocvars[i].varname=LoadString(S);
+ f->locvars[i].startpc=LoadInt(S);
+ f->locvars[i].endpc=LoadInt(S);
+ }
+ n=LoadInt(S);
+ f->upvalues=luaM_newvector(S->L,n,TString*);
+ f->sizeupvalues=n;
+ for (i=0; iupvalues[i]=NULL;
+ for (i=0; iupvalues[i]=LoadString(S);
+}
+
+static Proto* LoadFunction(LoadState* S, TString* p)
+{
+ Proto* f=luaF_newproto(S->L);
+ setptvalue2s(S->L,S->L->top,f); incr_top(S->L);
+ f->source=LoadString(S); if (f->source==NULL) f->source=p;
+ f->linedefined=LoadInt(S);
+ f->lastlinedefined=LoadInt(S);
+ f->nups=LoadByte(S);
+ f->numparams=LoadByte(S);
+ f->is_vararg=LoadByte(S);
+ f->maxstacksize=LoadByte(S);
+ LoadCode(S,f);
+ LoadConstants(S,f);
+ LoadDebug(S,f);
+ IF (!luaG_checkcode(f), "bad code");
+ S->L->top--;
+ return f;
+}
+
+static void LoadHeader(LoadState* S)
+{
+ char h[LUAC_HEADERSIZE];
+ char s[LUAC_HEADERSIZE];
+ luaU_header(h);
+ LoadBlock(S,s,LUAC_HEADERSIZE);
+ IF (memcmp(h,s,LUAC_HEADERSIZE)!=0, "bad header");
+}
+
+/*
+** load precompiled chunk
+*/
+Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name)
+{
+ LoadState S;
+ if (*name=='@' || *name=='=')
+ S.name=name+1;
+ else if (*name==LUA_SIGNATURE[0])
+ S.name="binary string";
+ else
+ S.name=name;
+ S.L=L;
+ S.Z=Z;
+ S.b=buff;
+ LoadHeader(&S);
+ return LoadFunction(&S,luaS_newliteral(L,"=?"));
+}
+
+/*
+* make header
+*/
+void luaU_header (char* h)
+{
+ int x=1;
+ memcpy(h,LUA_SIGNATURE,sizeof(LUA_SIGNATURE)-1);
+ h+=sizeof(LUA_SIGNATURE)-1;
+ *h++=(char)LUAC_VERSION;
+ *h++=(char)LUAC_FORMAT;
+ *h++=(char)*(char*)&x; /* endianness */
+ *h++=(char)sizeof(int);
+ *h++=(char)sizeof(size_t);
+ *h++=(char)sizeof(Instruction);
+ *h++=(char)sizeof(lua_Number);
+ *h++=(char)(((lua_Number)0.5)==0); /* is lua_Number integral? */
+}
diff --git a/deps/lua/src/lundump.h b/deps/lua/src/lundump.h
new file mode 100644
index 0000000000000000000000000000000000000000..58cca5d19083a235e835566f1867104f011aa70d
--- /dev/null
+++ b/deps/lua/src/lundump.h
@@ -0,0 +1,36 @@
+/*
+** $Id: lundump.h,v 1.40 2005/11/11 14:03:13 lhf Exp $
+** load precompiled Lua chunks
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lundump_h
+#define lundump_h
+
+#include "lobject.h"
+#include "lzio.h"
+
+/* load one chunk; from lundump.c */
+LUAI_FUNC Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name);
+
+/* make header; from lundump.c */
+LUAI_FUNC void luaU_header (char* h);
+
+/* dump one chunk; from ldump.c */
+LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip);
+
+#ifdef luac_c
+/* print one chunk; from print.c */
+LUAI_FUNC void luaU_print (const Proto* f, int full);
+#endif
+
+/* for header of binary files -- this is Lua 5.1 */
+#define LUAC_VERSION 0x51
+
+/* for header of binary files -- this is the official format */
+#define LUAC_FORMAT 0
+
+/* size of header of binary files */
+#define LUAC_HEADERSIZE 12
+
+#endif
diff --git a/deps/lua/src/lvm.c b/deps/lua/src/lvm.c
new file mode 100644
index 0000000000000000000000000000000000000000..6f4c0291c970a0c49085797ef2e87565fada2cd3
--- /dev/null
+++ b/deps/lua/src/lvm.c
@@ -0,0 +1,762 @@
+/*
+** $Id: lvm.c,v 2.62 2006/01/23 19:51:43 roberto Exp $
+** Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+#include
+#include
+
+#define lvm_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "ldebug.h"
+#include "ldo.h"
+#include "lfunc.h"
+#include "lgc.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lstate.h"
+#include "lstring.h"
+#include "ltable.h"
+#include "ltm.h"
+#include "lvm.h"
+
+
+
+/* limit for table tag-method chains (to avoid loops) */
+#define MAXTAGLOOP 100
+
+
+const TValue *luaV_tonumber (const TValue *obj, TValue *n) {
+ lua_Number num;
+ if (ttisnumber(obj)) return obj;
+ if (ttisstring(obj) && luaO_str2d(svalue(obj), &num)) {
+ setnvalue(n, num);
+ return n;
+ }
+ else
+ return NULL;
+}
+
+
+int luaV_tostring (lua_State *L, StkId obj) {
+ if (!ttisnumber(obj))
+ return 0;
+ else {
+ char s[LUAI_MAXNUMBER2STR];
+ lua_Number n = nvalue(obj);
+ lua_number2str(s, n);
+ setsvalue2s(L, obj, luaS_new(L, s));
+ return 1;
+ }
+}
+
+
+static void traceexec (lua_State *L, const Instruction *pc) {
+ lu_byte mask = L->hookmask;
+ const Instruction *oldpc = L->savedpc;
+ L->savedpc = pc;
+ if (mask > LUA_MASKLINE) { /* instruction-hook set? */
+ if (L->hookcount == 0) {
+ resethookcount(L);
+ luaD_callhook(L, LUA_HOOKCOUNT, -1);
+ }
+ }
+ if (mask & LUA_MASKLINE) {
+ Proto *p = ci_func(L->ci)->l.p;
+ int npc = pcRel(pc, p);
+ int newline = getline(p, npc);
+ /* call linehook when enter a new function, when jump back (loop),
+ or when enter a new line */
+ if (npc == 0 || pc <= oldpc || newline != getline(p, pcRel(oldpc, p)))
+ luaD_callhook(L, LUA_HOOKLINE, newline);
+ }
+}
+
+
+static void callTMres (lua_State *L, StkId res, const TValue *f,
+ const TValue *p1, const TValue *p2) {
+ ptrdiff_t result = savestack(L, res);
+ setobj2s(L, L->top, f); /* push function */
+ setobj2s(L, L->top+1, p1); /* 1st argument */
+ setobj2s(L, L->top+2, p2); /* 2nd argument */
+ luaD_checkstack(L, 3);
+ L->top += 3;
+ luaD_call(L, L->top - 3, 1);
+ res = restorestack(L, result);
+ L->top--;
+ setobjs2s(L, res, L->top);
+}
+
+
+
+static void callTM (lua_State *L, const TValue *f, const TValue *p1,
+ const TValue *p2, const TValue *p3) {
+ setobj2s(L, L->top, f); /* push function */
+ setobj2s(L, L->top+1, p1); /* 1st argument */
+ setobj2s(L, L->top+2, p2); /* 2nd argument */
+ setobj2s(L, L->top+3, p3); /* 3th argument */
+ luaD_checkstack(L, 4);
+ L->top += 4;
+ luaD_call(L, L->top - 4, 0);
+}
+
+
+void luaV_gettable (lua_State *L, const TValue *t, TValue *key, StkId val) {
+ int loop;
+ for (loop = 0; loop < MAXTAGLOOP; loop++) {
+ const TValue *tm;
+ if (ttistable(t)) { /* `t' is a table? */
+ Table *h = hvalue(t);
+ const TValue *res = luaH_get(h, key); /* do a primitive get */
+ if (!ttisnil(res) || /* result is no nil? */
+ (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL) { /* or no TM? */
+ setobj2s(L, val, res);
+ return;
+ }
+ /* else will try the tag method */
+ }
+ else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX)))
+ luaG_typeerror(L, t, "index");
+ if (ttisfunction(tm)) {
+ callTMres(L, val, tm, t, key);
+ return;
+ }
+ t = tm; /* else repeat with `tm' */
+ }
+ luaG_runerror(L, "loop in gettable");
+}
+
+
+void luaV_settable (lua_State *L, const TValue *t, TValue *key, StkId val) {
+ int loop;
+ for (loop = 0; loop < MAXTAGLOOP; loop++) {
+ const TValue *tm;
+ if (ttistable(t)) { /* `t' is a table? */
+ Table *h = hvalue(t);
+ TValue *oldval = luaH_set(L, h, key); /* do a primitive set */
+ if (!ttisnil(oldval) || /* result is no nil? */
+ (tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL) { /* or no TM? */
+ setobj2t(L, oldval, val);
+ luaC_barriert(L, h, val);
+ return;
+ }
+ /* else will try the tag method */
+ }
+ else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX)))
+ luaG_typeerror(L, t, "index");
+ if (ttisfunction(tm)) {
+ callTM(L, tm, t, key, val);
+ return;
+ }
+ t = tm; /* else repeat with `tm' */
+ }
+ luaG_runerror(L, "loop in settable");
+}
+
+
+static int call_binTM (lua_State *L, const TValue *p1, const TValue *p2,
+ StkId res, TMS event) {
+ const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */
+ if (ttisnil(tm))
+ tm = luaT_gettmbyobj(L, p2, event); /* try second operand */
+ if (!ttisfunction(tm)) return 0;
+ callTMres(L, res, tm, p1, p2);
+ return 1;
+}
+
+
+static const TValue *get_compTM (lua_State *L, Table *mt1, Table *mt2,
+ TMS event) {
+ const TValue *tm1 = fasttm(L, mt1, event);
+ const TValue *tm2;
+ if (tm1 == NULL) return NULL; /* no metamethod */
+ if (mt1 == mt2) return tm1; /* same metatables => same metamethods */
+ tm2 = fasttm(L, mt2, event);
+ if (tm2 == NULL) return NULL; /* no metamethod */
+ if (luaO_rawequalObj(tm1, tm2)) /* same metamethods? */
+ return tm1;
+ return NULL;
+}
+
+
+static int call_orderTM (lua_State *L, const TValue *p1, const TValue *p2,
+ TMS event) {
+ const TValue *tm1 = luaT_gettmbyobj(L, p1, event);
+ const TValue *tm2;
+ if (ttisnil(tm1)) return -1; /* no metamethod? */
+ tm2 = luaT_gettmbyobj(L, p2, event);
+ if (!luaO_rawequalObj(tm1, tm2)) /* different metamethods? */
+ return -1;
+ callTMres(L, L->top, tm1, p1, p2);
+ return !l_isfalse(L->top);
+}
+
+
+static int l_strcmp (const TString *ls, const TString *rs) {
+ const char *l = getstr(ls);
+ size_t ll = ls->tsv.len;
+ const char *r = getstr(rs);
+ size_t lr = rs->tsv.len;
+ for (;;) {
+ int temp = strcoll(l, r);
+ if (temp != 0) return temp;
+ else { /* strings are equal up to a `\0' */
+ size_t len = strlen(l); /* index of first `\0' in both strings */
+ if (len == lr) /* r is finished? */
+ return (len == ll) ? 0 : 1;
+ else if (len == ll) /* l is finished? */
+ return -1; /* l is smaller than r (because r is not finished) */
+ /* both strings longer than `len'; go on comparing (after the `\0') */
+ len++;
+ l += len; ll -= len; r += len; lr -= len;
+ }
+ }
+}
+
+
+int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) {
+ int res;
+ if (ttype(l) != ttype(r))
+ return luaG_ordererror(L, l, r);
+ else if (ttisnumber(l))
+ return luai_numlt(nvalue(l), nvalue(r));
+ else if (ttisstring(l))
+ return l_strcmp(rawtsvalue(l), rawtsvalue(r)) < 0;
+ else if ((res = call_orderTM(L, l, r, TM_LT)) != -1)
+ return res;
+ return luaG_ordererror(L, l, r);
+}
+
+
+static int lessequal (lua_State *L, const TValue *l, const TValue *r) {
+ int res;
+ if (ttype(l) != ttype(r))
+ return luaG_ordererror(L, l, r);
+ else if (ttisnumber(l))
+ return luai_numle(nvalue(l), nvalue(r));
+ else if (ttisstring(l))
+ return l_strcmp(rawtsvalue(l), rawtsvalue(r)) <= 0;
+ else if ((res = call_orderTM(L, l, r, TM_LE)) != -1) /* first try `le' */
+ return res;
+ else if ((res = call_orderTM(L, r, l, TM_LT)) != -1) /* else try `lt' */
+ return !res;
+ return luaG_ordererror(L, l, r);
+}
+
+
+int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2) {
+ const TValue *tm;
+ lua_assert(ttype(t1) == ttype(t2));
+ switch (ttype(t1)) {
+ case LUA_TNIL: return 1;
+ case LUA_TNUMBER: return luai_numeq(nvalue(t1), nvalue(t2));
+ case LUA_TBOOLEAN: return bvalue(t1) == bvalue(t2); /* true must be 1 !! */
+ case LUA_TLIGHTUSERDATA: return pvalue(t1) == pvalue(t2);
+ case LUA_TUSERDATA: {
+ if (uvalue(t1) == uvalue(t2)) return 1;
+ tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable,
+ TM_EQ);
+ break; /* will try TM */
+ }
+ case LUA_TTABLE: {
+ if (hvalue(t1) == hvalue(t2)) return 1;
+ tm = get_compTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ);
+ break; /* will try TM */
+ }
+ default: return gcvalue(t1) == gcvalue(t2);
+ }
+ if (tm == NULL) return 0; /* no TM? */
+ callTMres(L, L->top, tm, t1, t2); /* call TM */
+ return !l_isfalse(L->top);
+}
+
+
+void luaV_concat (lua_State *L, int total, int last) {
+ do {
+ StkId top = L->base + last + 1;
+ int n = 2; /* number of elements handled in this pass (at least 2) */
+ if (!tostring(L, top-2) || !tostring(L, top-1)) {
+ if (!call_binTM(L, top-2, top-1, top-2, TM_CONCAT))
+ luaG_concaterror(L, top-2, top-1);
+ } else if (tsvalue(top-1)->len > 0) { /* if len=0, do nothing */
+ /* at least two string values; get as many as possible */
+ size_t tl = tsvalue(top-1)->len;
+ char *buffer;
+ int i;
+ /* collect total length */
+ for (n = 1; n < total && tostring(L, top-n-1); n++) {
+ size_t l = tsvalue(top-n-1)->len;
+ if (l >= MAX_SIZET - tl) luaG_runerror(L, "string length overflow");
+ tl += l;
+ }
+ buffer = luaZ_openspace(L, &G(L)->buff, tl);
+ tl = 0;
+ for (i=n; i>0; i--) { /* concat all strings */
+ size_t l = tsvalue(top-i)->len;
+ memcpy(buffer+tl, svalue(top-i), l);
+ tl += l;
+ }
+ setsvalue2s(L, top-n, luaS_newlstr(L, buffer, tl));
+ }
+ total -= n-1; /* got `n' strings to create 1 new */
+ last -= n-1;
+ } while (total > 1); /* repeat until only 1 result left */
+}
+
+
+static void Arith (lua_State *L, StkId ra, const TValue *rb,
+ const TValue *rc, TMS op) {
+ TValue tempb, tempc;
+ const TValue *b, *c;
+ if ((b = luaV_tonumber(rb, &tempb)) != NULL &&
+ (c = luaV_tonumber(rc, &tempc)) != NULL) {
+ lua_Number nb = nvalue(b), nc = nvalue(c);
+ switch (op) {
+ case TM_ADD: setnvalue(ra, luai_numadd(nb, nc)); break;
+ case TM_SUB: setnvalue(ra, luai_numsub(nb, nc)); break;
+ case TM_MUL: setnvalue(ra, luai_nummul(nb, nc)); break;
+ case TM_DIV: setnvalue(ra, luai_numdiv(nb, nc)); break;
+ case TM_MOD: setnvalue(ra, luai_nummod(nb, nc)); break;
+ case TM_POW: setnvalue(ra, luai_numpow(nb, nc)); break;
+ case TM_UNM: setnvalue(ra, luai_numunm(nb)); break;
+ default: lua_assert(0); break;
+ }
+ }
+ else if (!call_binTM(L, rb, rc, ra, op))
+ luaG_aritherror(L, rb, rc);
+}
+
+
+
+/*
+** some macros for common tasks in `luaV_execute'
+*/
+
+#define runtime_check(L, c) { if (!(c)) break; }
+
+#define RA(i) (base+GETARG_A(i))
+/* to be used after possible stack reallocation */
+#define RB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgR, base+GETARG_B(i))
+#define RC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgR, base+GETARG_C(i))
+#define RKB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, \
+ ISK(GETARG_B(i)) ? k+INDEXK(GETARG_B(i)) : base+GETARG_B(i))
+#define RKC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgK, \
+ ISK(GETARG_C(i)) ? k+INDEXK(GETARG_C(i)) : base+GETARG_C(i))
+#define KBx(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, k+GETARG_Bx(i))
+
+
+#define dojump(L,pc,i) {(pc) += (i); luai_threadyield(L);}
+
+
+#define Protect(x) { L->savedpc = pc; {x;}; base = L->base; }
+
+
+#define arith_op(op,tm) { \
+ TValue *rb = RKB(i); \
+ TValue *rc = RKC(i); \
+ if (ttisnumber(rb) && ttisnumber(rc)) { \
+ lua_Number nb = nvalue(rb), nc = nvalue(rc); \
+ setnvalue(ra, op(nb, nc)); \
+ } \
+ else \
+ Protect(Arith(L, ra, rb, rc, tm)); \
+ }
+
+
+
+void luaV_execute (lua_State *L, int nexeccalls) {
+ LClosure *cl;
+ StkId base;
+ TValue *k;
+ const Instruction *pc;
+ reentry: /* entry point */
+ pc = L->savedpc;
+ cl = &clvalue(L->ci->func)->l;
+ base = L->base;
+ k = cl->p->k;
+ /* main loop of interpreter */
+ for (;;) {
+ const Instruction i = *pc++;
+ StkId ra;
+ if ((L->hookmask & (LUA_MASKLINE | LUA_MASKCOUNT)) &&
+ (--L->hookcount == 0 || L->hookmask & LUA_MASKLINE)) {
+ traceexec(L, pc);
+ if (L->status == LUA_YIELD) { /* did hook yield? */
+ L->savedpc = pc - 1;
+ return;
+ }
+ base = L->base;
+ }
+ /* warning!! several calls may realloc the stack and invalidate `ra' */
+ ra = RA(i);
+ lua_assert(base == L->base && L->base == L->ci->base);
+ lua_assert(base <= L->top && L->top <= L->stack + L->stacksize);
+ lua_assert(L->top == L->ci->top || luaG_checkopenop(i));
+ switch (GET_OPCODE(i)) {
+ case OP_MOVE: {
+ setobjs2s(L, ra, RB(i));
+ continue;
+ }
+ case OP_LOADK: {
+ setobj2s(L, ra, KBx(i));
+ continue;
+ }
+ case OP_LOADBOOL: {
+ setbvalue(ra, GETARG_B(i));
+ if (GETARG_C(i)) pc++; /* skip next instruction (if C) */
+ continue;
+ }
+ case OP_LOADNIL: {
+ TValue *rb = RB(i);
+ do {
+ setnilvalue(rb--);
+ } while (rb >= ra);
+ continue;
+ }
+ case OP_GETUPVAL: {
+ int b = GETARG_B(i);
+ setobj2s(L, ra, cl->upvals[b]->v);
+ continue;
+ }
+ case OP_GETGLOBAL: {
+ TValue g;
+ TValue *rb = KBx(i);
+ sethvalue(L, &g, cl->env);
+ lua_assert(ttisstring(rb));
+ Protect(luaV_gettable(L, &g, rb, ra));
+ continue;
+ }
+ case OP_GETTABLE: {
+ Protect(luaV_gettable(L, RB(i), RKC(i), ra));
+ continue;
+ }
+ case OP_SETGLOBAL: {
+ TValue g;
+ sethvalue(L, &g, cl->env);
+ lua_assert(ttisstring(KBx(i)));
+ Protect(luaV_settable(L, &g, KBx(i), ra));
+ continue;
+ }
+ case OP_SETUPVAL: {
+ UpVal *uv = cl->upvals[GETARG_B(i)];
+ setobj(L, uv->v, ra);
+ luaC_barrier(L, uv, ra);
+ continue;
+ }
+ case OP_SETTABLE: {
+ Protect(luaV_settable(L, ra, RKB(i), RKC(i)));
+ continue;
+ }
+ case OP_NEWTABLE: {
+ int b = GETARG_B(i);
+ int c = GETARG_C(i);
+ sethvalue(L, ra, luaH_new(L, luaO_fb2int(b), luaO_fb2int(c)));
+ Protect(luaC_checkGC(L));
+ continue;
+ }
+ case OP_SELF: {
+ StkId rb = RB(i);
+ setobjs2s(L, ra+1, rb);
+ Protect(luaV_gettable(L, rb, RKC(i), ra));
+ continue;
+ }
+ case OP_ADD: {
+ arith_op(luai_numadd, TM_ADD);
+ continue;
+ }
+ case OP_SUB: {
+ arith_op(luai_numsub, TM_SUB);
+ continue;
+ }
+ case OP_MUL: {
+ arith_op(luai_nummul, TM_MUL);
+ continue;
+ }
+ case OP_DIV: {
+ arith_op(luai_numdiv, TM_DIV);
+ continue;
+ }
+ case OP_MOD: {
+ arith_op(luai_nummod, TM_MOD);
+ continue;
+ }
+ case OP_POW: {
+ arith_op(luai_numpow, TM_POW);
+ continue;
+ }
+ case OP_UNM: {
+ TValue *rb = RB(i);
+ if (ttisnumber(rb)) {
+ lua_Number nb = nvalue(rb);
+ setnvalue(ra, luai_numunm(nb));
+ }
+ else {
+ Protect(Arith(L, ra, rb, rb, TM_UNM));
+ }
+ continue;
+ }
+ case OP_NOT: {
+ int res = l_isfalse(RB(i)); /* next assignment may change this value */
+ setbvalue(ra, res);
+ continue;
+ }
+ case OP_LEN: {
+ const TValue *rb = RB(i);
+ switch (ttype(rb)) {
+ case LUA_TTABLE: {
+ setnvalue(ra, cast_num(luaH_getn(hvalue(rb))));
+ break;
+ }
+ case LUA_TSTRING: {
+ setnvalue(ra, cast_num(tsvalue(rb)->len));
+ break;
+ }
+ default: { /* try metamethod */
+ Protect(
+ if (!call_binTM(L, rb, luaO_nilobject, ra, TM_LEN))
+ luaG_typeerror(L, rb, "get length of");
+ )
+ }
+ }
+ continue;
+ }
+ case OP_CONCAT: {
+ int b = GETARG_B(i);
+ int c = GETARG_C(i);
+ Protect(luaV_concat(L, c-b+1, c); luaC_checkGC(L));
+ setobjs2s(L, RA(i), base+b);
+ continue;
+ }
+ case OP_JMP: {
+ dojump(L, pc, GETARG_sBx(i));
+ continue;
+ }
+ case OP_EQ: {
+ TValue *rb = RKB(i);
+ TValue *rc = RKC(i);
+ Protect(
+ if (equalobj(L, rb, rc) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_LT: {
+ Protect(
+ if (luaV_lessthan(L, RKB(i), RKC(i)) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_LE: {
+ Protect(
+ if (lessequal(L, RKB(i), RKC(i)) == GETARG_A(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ )
+ pc++;
+ continue;
+ }
+ case OP_TEST: {
+ if (l_isfalse(ra) != GETARG_C(i))
+ dojump(L, pc, GETARG_sBx(*pc));
+ pc++;
+ continue;
+ }
+ case OP_TESTSET: {
+ TValue *rb = RB(i);
+ if (l_isfalse(rb) != GETARG_C(i)) {
+ setobjs2s(L, ra, rb);
+ dojump(L, pc, GETARG_sBx(*pc));
+ }
+ pc++;
+ continue;
+ }
+ case OP_CALL: {
+ int b = GETARG_B(i);
+ int nresults = GETARG_C(i) - 1;
+ if (b != 0) L->top = ra+b; /* else previous instruction set top */
+ L->savedpc = pc;
+ switch (luaD_precall(L, ra, nresults)) {
+ case PCRLUA: {
+ nexeccalls++;
+ goto reentry; /* restart luaV_execute over new Lua function */
+ }
+ case PCRC: {
+ /* it was a C function (`precall' called it); adjust results */
+ if (nresults >= 0) L->top = L->ci->top;
+ base = L->base;
+ continue;
+ }
+ default: {
+ return; /* yield */
+ }
+ }
+ }
+ case OP_TAILCALL: {
+ int b = GETARG_B(i);
+ if (b != 0) L->top = ra+b; /* else previous instruction set top */
+ L->savedpc = pc;
+ lua_assert(GETARG_C(i) - 1 == LUA_MULTRET);
+ switch (luaD_precall(L, ra, LUA_MULTRET)) {
+ case PCRLUA: {
+ /* tail call: put new frame in place of previous one */
+ CallInfo *ci = L->ci - 1; /* previous frame */
+ int aux;
+ StkId func = ci->func;
+ StkId pfunc = (ci+1)->func; /* previous function index */
+ if (L->openupval) luaF_close(L, ci->base);
+ L->base = ci->base = ci->func + ((ci+1)->base - pfunc);
+ for (aux = 0; pfunc+aux < L->top; aux++) /* move frame down */
+ setobjs2s(L, func+aux, pfunc+aux);
+ ci->top = L->top = func+aux; /* correct top */
+ lua_assert(L->top == L->base + clvalue(func)->l.p->maxstacksize);
+ ci->savedpc = L->savedpc;
+ ci->tailcalls++; /* one more call lost */
+ L->ci--; /* remove new frame */
+ goto reentry;
+ }
+ case PCRC: { /* it was a C function (`precall' called it) */
+ base = L->base;
+ continue;
+ }
+ default: {
+ return; /* yield */
+ }
+ }
+ }
+ case OP_RETURN: {
+ int b = GETARG_B(i);
+ if (b != 0) L->top = ra+b-1;
+ if (L->openupval) luaF_close(L, base);
+ L->savedpc = pc;
+ b = luaD_poscall(L, ra);
+ if (--nexeccalls == 0) /* was previous function running `here'? */
+ return; /* no: return */
+ else { /* yes: continue its execution */
+ if (b) L->top = L->ci->top;
+ lua_assert(isLua(L->ci));
+ lua_assert(GET_OPCODE(*((L->ci)->savedpc - 1)) == OP_CALL);
+ goto reentry;
+ }
+ }
+ case OP_FORLOOP: {
+ lua_Number step = nvalue(ra+2);
+ lua_Number idx = luai_numadd(nvalue(ra), step); /* increment index */
+ lua_Number limit = nvalue(ra+1);
+ if (luai_numlt(0, step) ? luai_numle(idx, limit)
+ : luai_numle(limit, idx)) {
+ dojump(L, pc, GETARG_sBx(i)); /* jump back */
+ setnvalue(ra, idx); /* update internal index... */
+ setnvalue(ra+3, idx); /* ...and external index */
+ }
+ continue;
+ }
+ case OP_FORPREP: {
+ const TValue *init = ra;
+ const TValue *plimit = ra+1;
+ const TValue *pstep = ra+2;
+ L->savedpc = pc; /* next steps may throw errors */
+ if (!tonumber(init, ra))
+ luaG_runerror(L, LUA_QL("for") " initial value must be a number");
+ else if (!tonumber(plimit, ra+1))
+ luaG_runerror(L, LUA_QL("for") " limit must be a number");
+ else if (!tonumber(pstep, ra+2))
+ luaG_runerror(L, LUA_QL("for") " step must be a number");
+ setnvalue(ra, luai_numsub(nvalue(ra), nvalue(pstep)));
+ dojump(L, pc, GETARG_sBx(i));
+ continue;
+ }
+ case OP_TFORLOOP: {
+ StkId cb = ra + 3; /* call base */
+ setobjs2s(L, cb+2, ra+2);
+ setobjs2s(L, cb+1, ra+1);
+ setobjs2s(L, cb, ra);
+ L->top = cb+3; /* func. + 2 args (state and index) */
+ Protect(luaD_call(L, cb, GETARG_C(i)));
+ L->top = L->ci->top;
+ cb = RA(i) + 3; /* previous call may change the stack */
+ if (!ttisnil(cb)) { /* continue loop? */
+ setobjs2s(L, cb-1, cb); /* save control variable */
+ dojump(L, pc, GETARG_sBx(*pc)); /* jump back */
+ }
+ pc++;
+ continue;
+ }
+ case OP_SETLIST: {
+ int n = GETARG_B(i);
+ int c = GETARG_C(i);
+ int last;
+ Table *h;
+ if (n == 0) {
+ n = cast_int(L->top - ra) - 1;
+ L->top = L->ci->top;
+ }
+ if (c == 0) c = cast_int(*pc++);
+ runtime_check(L, ttistable(ra));
+ h = hvalue(ra);
+ last = ((c-1)*LFIELDS_PER_FLUSH) + n;
+ if (last > h->sizearray) /* needs more space? */
+ luaH_resizearray(L, h, last); /* pre-alloc it at once */
+ for (; n > 0; n--) {
+ TValue *val = ra+n;
+ setobj2t(L, luaH_setnum(L, h, last--), val);
+ luaC_barriert(L, h, val);
+ }
+ continue;
+ }
+ case OP_CLOSE: {
+ luaF_close(L, ra);
+ continue;
+ }
+ case OP_CLOSURE: {
+ Proto *p;
+ Closure *ncl;
+ int nup, j;
+ p = cl->p->p[GETARG_Bx(i)];
+ nup = p->nups;
+ ncl = luaF_newLclosure(L, nup, cl->env);
+ ncl->l.p = p;
+ for (j=0; jl.upvals[j] = cl->upvals[GETARG_B(*pc)];
+ else {
+ lua_assert(GET_OPCODE(*pc) == OP_MOVE);
+ ncl->l.upvals[j] = luaF_findupval(L, base + GETARG_B(*pc));
+ }
+ }
+ setclvalue(L, ra, ncl);
+ Protect(luaC_checkGC(L));
+ continue;
+ }
+ case OP_VARARG: {
+ int b = GETARG_B(i) - 1;
+ int j;
+ CallInfo *ci = L->ci;
+ int n = cast_int(ci->base - ci->func) - cl->p->numparams - 1;
+ if (b == LUA_MULTRET) {
+ Protect(luaD_checkstack(L, n));
+ ra = RA(i); /* previous call may change the stack */
+ b = n;
+ L->top = ra + n;
+ }
+ for (j = 0; j < b; j++) {
+ if (j < n) {
+ setobjs2s(L, ra + j, ci->base - n + j);
+ }
+ else {
+ setnilvalue(ra + j);
+ }
+ }
+ continue;
+ }
+ }
+ }
+}
+
diff --git a/deps/lua/src/lvm.h b/deps/lua/src/lvm.h
new file mode 100644
index 0000000000000000000000000000000000000000..788423f8e31f8e9bcf30415584faaec768ea767a
--- /dev/null
+++ b/deps/lua/src/lvm.h
@@ -0,0 +1,36 @@
+/*
+** $Id: lvm.h,v 2.5 2005/08/22 18:54:49 roberto Exp $
+** Lua virtual machine
+** See Copyright Notice in lua.h
+*/
+
+#ifndef lvm_h
+#define lvm_h
+
+
+#include "ldo.h"
+#include "lobject.h"
+#include "ltm.h"
+
+
+#define tostring(L,o) ((ttype(o) == LUA_TSTRING) || (luaV_tostring(L, o)))
+
+#define tonumber(o,n) (ttype(o) == LUA_TNUMBER || \
+ (((o) = luaV_tonumber(o,n)) != NULL))
+
+#define equalobj(L,o1,o2) \
+ (ttype(o1) == ttype(o2) && luaV_equalval(L, o1, o2))
+
+
+LUAI_FUNC int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r);
+LUAI_FUNC int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2);
+LUAI_FUNC const TValue *luaV_tonumber (const TValue *obj, TValue *n);
+LUAI_FUNC int luaV_tostring (lua_State *L, StkId obj);
+LUAI_FUNC void luaV_gettable (lua_State *L, const TValue *t, TValue *key,
+ StkId val);
+LUAI_FUNC void luaV_settable (lua_State *L, const TValue *t, TValue *key,
+ StkId val);
+LUAI_FUNC void luaV_execute (lua_State *L, int nexeccalls);
+LUAI_FUNC void luaV_concat (lua_State *L, int total, int last);
+
+#endif
diff --git a/deps/lua/src/lzio.c b/deps/lua/src/lzio.c
new file mode 100644
index 0000000000000000000000000000000000000000..5121ada8466e75cfe60cf9cde8282c30499892b8
--- /dev/null
+++ b/deps/lua/src/lzio.c
@@ -0,0 +1,82 @@
+/*
+** $Id: lzio.c,v 1.31 2005/06/03 20:15:29 roberto Exp $
+** a generic input stream interface
+** See Copyright Notice in lua.h
+*/
+
+
+#include
+
+#define lzio_c
+#define LUA_CORE
+
+#include "lua.h"
+
+#include "llimits.h"
+#include "lmem.h"
+#include "lstate.h"
+#include "lzio.h"
+
+
+int luaZ_fill (ZIO *z) {
+ size_t size;
+ lua_State *L = z->L;
+ const char *buff;
+ lua_unlock(L);
+ buff = z->reader(L, z->data, &size);
+ lua_lock(L);
+ if (buff == NULL || size == 0) return EOZ;
+ z->n = size - 1;
+ z->p = buff;
+ return char2int(*(z->p++));
+}
+
+
+int luaZ_lookahead (ZIO *z) {
+ if (z->n == 0) {
+ if (luaZ_fill(z) == EOZ)
+ return EOZ;
+ else {
+ z->n++; /* luaZ_fill removed first byte; put back it */
+ z->p--;
+ }
+ }
+ return char2int(*z->p);
+}
+
+
+void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, void *data) {
+ z->L = L;
+ z->reader = reader;
+ z->data = data;
+ z->n = 0;
+ z->p = NULL;
+}
+
+
+/* --------------------------------------------------------------- read --- */
+size_t luaZ_read (ZIO *z, void *b, size_t n) {
+ while (n) {
+ size_t m;
+ if (luaZ_lookahead(z) == EOZ)
+ return n; /* return number of missing bytes */
+ m = (n <= z->n) ? n : z->n; /* min. between n and z->n */
+ memcpy(b, z->p, m);
+ z->n -= m;
+ z->p += m;
+ b = (char *)b + m;
+ n -= m;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n) {
+ if (n > buff->buffsize) {
+ if (n < LUA_MINBUFFER) n = LUA_MINBUFFER;
+ luaZ_resizebuffer(L, buff, n);
+ }
+ return buff->buffer;
+}
+
+
diff --git a/deps/lua/src/lzio.h b/deps/lua/src/lzio.h
new file mode 100644
index 0000000000000000000000000000000000000000..8f403b8e74f5e4562328ebfa76f9c0ef4c77266d
--- /dev/null
+++ b/deps/lua/src/lzio.h
@@ -0,0 +1,67 @@
+/*
+** $Id: lzio.h,v 1.21 2005/05/17 19:49:15 roberto Exp $
+** Buffered streams
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lzio_h
+#define lzio_h
+
+#include "lua.h"
+
+#include "lmem.h"
+
+
+#define EOZ (-1) /* end of stream */
+
+typedef struct Zio ZIO;
+
+#define char2int(c) cast(int, cast(unsigned char, (c)))
+
+#define zgetc(z) (((z)->n--)>0 ? char2int(*(z)->p++) : luaZ_fill(z))
+
+typedef struct Mbuffer {
+ char *buffer;
+ size_t n;
+ size_t buffsize;
+} Mbuffer;
+
+#define luaZ_initbuffer(L, buff) ((buff)->buffer = NULL, (buff)->buffsize = 0)
+
+#define luaZ_buffer(buff) ((buff)->buffer)
+#define luaZ_sizebuffer(buff) ((buff)->buffsize)
+#define luaZ_bufflen(buff) ((buff)->n)
+
+#define luaZ_resetbuffer(buff) ((buff)->n = 0)
+
+
+#define luaZ_resizebuffer(L, buff, size) \
+ (luaM_reallocvector(L, (buff)->buffer, (buff)->buffsize, size, char), \
+ (buff)->buffsize = size)
+
+#define luaZ_freebuffer(L, buff) luaZ_resizebuffer(L, buff, 0)
+
+
+LUAI_FUNC char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n);
+LUAI_FUNC void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader,
+ void *data);
+LUAI_FUNC size_t luaZ_read (ZIO* z, void* b, size_t n); /* read next n bytes */
+LUAI_FUNC int luaZ_lookahead (ZIO *z);
+
+
+
+/* --------- Private Part ------------------ */
+
+struct Zio {
+ size_t n; /* bytes still unread */
+ const char *p; /* current position in buffer */
+ lua_Reader reader;
+ void* data; /* additional data */
+ lua_State *L; /* Lua state (for reader) */
+};
+
+
+LUAI_FUNC int luaZ_fill (ZIO *z);
+
+#endif
diff --git a/deps/lua/src/print.c b/deps/lua/src/print.c
new file mode 100644
index 0000000000000000000000000000000000000000..1c3a4457c498c68bf58b3f790aaae76610cafa8c
--- /dev/null
+++ b/deps/lua/src/print.c
@@ -0,0 +1,224 @@
+/*
+** $Id: print.c,v 1.54 2006/01/11 22:49:27 lhf Exp $
+** print bytecodes
+** See Copyright Notice in lua.h
+*/
+
+#include
+#include
+
+#define luac_c
+#define LUA_CORE
+
+#include "ldebug.h"
+#include "lobject.h"
+#include "lopcodes.h"
+#include "lundump.h"
+
+#define PrintFunction luaU_print
+
+#define Sizeof(x) ((int)sizeof(x))
+#define VOID(p) ((const void*)(p))
+
+static void PrintString(const Proto* f, int n)
+{
+ const char* s=svalue(&f->k[n]);
+ putchar('"');
+ for (; *s; s++)
+ {
+ switch (*s)
+ {
+ case '"': printf("\\\""); break;
+ case '\a': printf("\\a"); break;
+ case '\b': printf("\\b"); break;
+ case '\f': printf("\\f"); break;
+ case '\n': printf("\\n"); break;
+ case '\r': printf("\\r"); break;
+ case '\t': printf("\\t"); break;
+ case '\v': printf("\\v"); break;
+ default: if (isprint((unsigned char)*s))
+ printf("%c",*s);
+ else
+ printf("\\%03u",(unsigned char)*s);
+ }
+ }
+ putchar('"');
+}
+
+static void PrintConstant(const Proto* f, int i)
+{
+ const TValue* o=&f->k[i];
+ switch (ttype(o))
+ {
+ case LUA_TNIL:
+ printf("nil");
+ break;
+ case LUA_TBOOLEAN:
+ printf(bvalue(o) ? "true" : "false");
+ break;
+ case LUA_TNUMBER:
+ printf(LUA_NUMBER_FMT,nvalue(o));
+ break;
+ case LUA_TSTRING:
+ PrintString(f,i);
+ break;
+ default: /* cannot happen */
+ printf("? type=%d",ttype(o));
+ break;
+ }
+}
+
+static void PrintCode(const Proto* f)
+{
+ const Instruction* code=f->code;
+ int pc,n=f->sizecode;
+ for (pc=0; pc0) printf("[%d]\t",line); else printf("[-]\t");
+ printf("%-9s\t",luaP_opnames[o]);
+ switch (getOpMode(o))
+ {
+ case iABC:
+ printf("%d",a);
+ if (getBMode(o)!=OpArgN) printf(" %d",ISK(b) ? (-1-INDEXK(b)) : b);
+ if (getCMode(o)!=OpArgN) printf(" %d",ISK(c) ? (-1-INDEXK(c)) : c);
+ break;
+ case iABx:
+ if (getBMode(o)==OpArgK) printf("%d %d",a,-1-bx); else printf("%d %d",a,bx);
+ break;
+ case iAsBx:
+ if (o==OP_JMP) printf("%d",sbx); else printf("%d %d",a,sbx);
+ break;
+ }
+ switch (o)
+ {
+ case OP_LOADK:
+ printf("\t; "); PrintConstant(f,bx);
+ break;
+ case OP_GETUPVAL:
+ case OP_SETUPVAL:
+ printf("\t; %s", (f->sizeupvalues>0) ? getstr(f->upvalues[b]) : "-");
+ break;
+ case OP_GETGLOBAL:
+ case OP_SETGLOBAL:
+ printf("\t; %s",svalue(&f->k[bx]));
+ break;
+ case OP_GETTABLE:
+ case OP_SELF:
+ if (ISK(c)) { printf("\t; "); PrintConstant(f,INDEXK(c)); }
+ break;
+ case OP_SETTABLE:
+ case OP_ADD:
+ case OP_SUB:
+ case OP_MUL:
+ case OP_DIV:
+ case OP_POW:
+ case OP_EQ:
+ case OP_LT:
+ case OP_LE:
+ if (ISK(b) || ISK(c))
+ {
+ printf("\t; ");
+ if (ISK(b)) PrintConstant(f,INDEXK(b)); else printf("-");
+ printf(" ");
+ if (ISK(c)) PrintConstant(f,INDEXK(c)); else printf("-");
+ }
+ break;
+ case OP_JMP:
+ case OP_FORLOOP:
+ case OP_FORPREP:
+ printf("\t; to %d",sbx+pc+2);
+ break;
+ case OP_CLOSURE:
+ printf("\t; %p",VOID(f->p[bx]));
+ break;
+ case OP_SETLIST:
+ if (c==0) printf("\t; %d",(int)code[++pc]);
+ else printf("\t; %d",c);
+ break;
+ default:
+ break;
+ }
+ printf("\n");
+ }
+}
+
+#define SS(x) (x==1)?"":"s"
+#define S(x) x,SS(x)
+
+static void PrintHeader(const Proto* f)
+{
+ const char* s=getstr(f->source);
+ if (*s=='@' || *s=='=')
+ s++;
+ else if (*s==LUA_SIGNATURE[0])
+ s="(bstring)";
+ else
+ s="(string)";
+ printf("\n%s <%s:%d,%d> (%d instruction%s, %d bytes at %p)\n",
+ (f->linedefined==0)?"main":"function",s,
+ f->linedefined,f->lastlinedefined,
+ S(f->sizecode),f->sizecode*Sizeof(Instruction),VOID(f));
+ printf("%d%s param%s, %d slot%s, %d upvalue%s, ",
+ f->numparams,f->is_vararg?"+":"",SS(f->numparams),
+ S(f->maxstacksize),S(f->nups));
+ printf("%d local%s, %d constant%s, %d function%s\n",
+ S(f->sizelocvars),S(f->sizek),S(f->sizep));
+}
+
+static void PrintConstants(const Proto* f)
+{
+ int i,n=f->sizek;
+ printf("constants (%d) for %p:\n",n,VOID(f));
+ for (i=0; isizelocvars;
+ printf("locals (%d) for %p:\n",n,VOID(f));
+ for (i=0; ilocvars[i].varname),f->locvars[i].startpc+1,f->locvars[i].endpc+1);
+ }
+}
+
+static void PrintUpvalues(const Proto* f)
+{
+ int i,n=f->sizeupvalues;
+ printf("upvalues (%d) for %p:\n",n,VOID(f));
+ if (f->upvalues==NULL) return;
+ for (i=0; iupvalues[i]));
+ }
+}
+
+void PrintFunction(const Proto* f, int full)
+{
+ int i,n=f->sizep;
+ PrintHeader(f);
+ PrintCode(f);
+ if (full)
+ {
+ PrintConstants(f);
+ PrintLocals(f);
+ PrintUpvalues(f);
+ }
+ for (i=0; ip[i],full);
+}
diff --git a/deps/lua/test/README b/deps/lua/test/README
new file mode 100644
index 0000000000000000000000000000000000000000..0c7f38bc25bdf6d1d336e60c12abed960dc961e8
--- /dev/null
+++ b/deps/lua/test/README
@@ -0,0 +1,26 @@
+These are simple tests for Lua. Some of them contain useful code.
+They are meant to be run to make sure Lua is built correctly and also
+to be read, to see how Lua programs look.
+
+Here is a one-line summary of each program:
+
+ bisect.lua bisection method for solving non-linear equations
+ cf.lua temperature conversion table (celsius to farenheit)
+ echo.lua echo command line arguments
+ env.lua environment variables as automatic global variables
+ factorial.lua factorial without recursion
+ fib.lua fibonacci function with cache
+ fibfor.lua fibonacci numbers with coroutines and generators
+ globals.lua report global variable usage
+ hello.lua the first program in every language
+ life.lua Conway's Game of Life
+ luac.lua bare-bones luac
+ printf.lua an implementation of printf
+ readonly.lua make global variables readonly
+ sieve.lua the sieve of of Eratosthenes programmed with coroutines
+ sort.lua two implementations of a sort function
+ table.lua make table, grouping all data for the same item
+ trace-calls.lua trace calls
+ trace-globals.lua trace assigments to global variables
+ xd.lua hex dump
+
diff --git a/deps/lua/test/bisect.lua b/deps/lua/test/bisect.lua
new file mode 100644
index 0000000000000000000000000000000000000000..f91e69bfbaf6710cc4ec99fee38aa37631c964de
--- /dev/null
+++ b/deps/lua/test/bisect.lua
@@ -0,0 +1,27 @@
+-- bisection method for solving non-linear equations
+
+delta=1e-6 -- tolerance
+
+function bisect(f,a,b,fa,fb)
+ local c=(a+b)/2
+ io.write(n," c=",c," a=",a," b=",b,"\n")
+ if c==a or c==b or math.abs(a-b) posted to lua-l
+-- modified to use ANSI terminal escape sequences
+-- modified to use for instead of while
+
+local write=io.write
+
+ALIVE="" DEAD=""
+ALIVE="O" DEAD="-"
+
+function delay() -- NOTE: SYSTEM-DEPENDENT, adjust as necessary
+ for i=1,10000 do end
+ -- local i=os.clock()+1 while(os.clock() 0 do
+ local xm1,x,xp1,xi=self.w-1,self.w,1,self.w
+ while xi > 0 do
+ local sum = self[ym1][xm1] + self[ym1][x] + self[ym1][xp1] +
+ self[y][xm1] + self[y][xp1] +
+ self[yp1][xm1] + self[yp1][x] + self[yp1][xp1]
+ next[y][x] = ((sum==2) and self[y][x]) or ((sum==3) and 1) or 0
+ xm1,x,xp1,xi = x,xp1,xp1+1,xi-1
+ end
+ ym1,y,yp1,yi = y,yp1,yp1+1,yi-1
+ end
+end
+
+-- output the array to screen
+function _CELLS:draw()
+ local out="" -- accumulate to reduce flicker
+ for y=1,self.h do
+ for x=1,self.w do
+ out=out..(((self[y][x]>0) and ALIVE) or DEAD)
+ end
+ out=out.."\n"
+ end
+ write(out)
+end
+
+-- constructor
+function CELLS(w,h)
+ local c = ARRAY2D(w,h)
+ c.spawn = _CELLS.spawn
+ c.evolve = _CELLS.evolve
+ c.draw = _CELLS.draw
+ return c
+end
+
+--
+-- shapes suitable for use with spawn() above
+--
+HEART = { 1,0,1,1,0,1,1,1,1; w=3,h=3 }
+GLIDER = { 0,0,1,1,0,1,0,1,1; w=3,h=3 }
+EXPLODE = { 0,1,0,1,1,1,1,0,1,0,1,0; w=3,h=4 }
+FISH = { 0,1,1,1,1,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0; w=5,h=4 }
+BUTTERFLY = { 1,0,0,0,1,0,1,1,1,0,1,0,0,0,1,1,0,1,0,1,1,0,0,0,1; w=5,h=5 }
+
+-- the main routine
+function LIFE(w,h)
+ -- create two arrays
+ local thisgen = CELLS(w,h)
+ local nextgen = CELLS(w,h)
+
+ -- create some life
+ -- about 1000 generations of fun, then a glider steady-state
+ thisgen:spawn(GLIDER,5,4)
+ thisgen:spawn(EXPLODE,25,10)
+ thisgen:spawn(FISH,4,12)
+
+ -- run until break
+ local gen=1
+ write("\027[2J") -- ANSI clear screen
+ while 1 do
+ thisgen:evolve(nextgen)
+ thisgen,nextgen = nextgen,thisgen
+ write("\027[H") -- ANSI home cursor
+ thisgen:draw()
+ write("Life - generation ",gen,"\n")
+ gen=gen+1
+ if gen>2000 then break end
+ --delay() -- no delay
+ end
+end
+
+LIFE(40,20)
diff --git a/deps/lua/test/luac.lua b/deps/lua/test/luac.lua
new file mode 100644
index 0000000000000000000000000000000000000000..96a0a97ce7aa2704c9b8b409bcc14f1a80c746ca
--- /dev/null
+++ b/deps/lua/test/luac.lua
@@ -0,0 +1,7 @@
+-- bare-bones luac in Lua
+-- usage: lua luac.lua file.lua
+
+assert(arg[1]~=nil and arg[2]==nil,"usage: lua luac.lua file.lua")
+f=assert(io.open("luac.out","wb"))
+assert(f:write(string.dump(assert(loadfile(arg[1])))))
+assert(f:close())
diff --git a/deps/lua/test/printf.lua b/deps/lua/test/printf.lua
new file mode 100644
index 0000000000000000000000000000000000000000..58c63ff5184e4f4274f9e0408a2959526c365ac0
--- /dev/null
+++ b/deps/lua/test/printf.lua
@@ -0,0 +1,7 @@
+-- an implementation of printf
+
+function printf(...)
+ io.write(string.format(...))
+end
+
+printf("Hello %s from %s on %s\n",os.getenv"USER" or "there",_VERSION,os.date())
diff --git a/deps/lua/test/readonly.lua b/deps/lua/test/readonly.lua
new file mode 100644
index 0000000000000000000000000000000000000000..85c0b4e01324d0cf5f87495d44a6da26ad4cce96
--- /dev/null
+++ b/deps/lua/test/readonly.lua
@@ -0,0 +1,12 @@
+-- make global variables readonly
+
+local f=function (t,i) error("cannot redefine global variable `"..i.."'",2) end
+local g={}
+local G=getfenv()
+setmetatable(g,{__index=G,__newindex=f})
+setfenv(1,g)
+
+-- an example
+rawset(g,"x",3)
+x=2
+y=1 -- cannot redefine `y'
diff --git a/deps/lua/test/sieve.lua b/deps/lua/test/sieve.lua
new file mode 100644
index 0000000000000000000000000000000000000000..0871bb212592726d5cca2c9478e9fcaf12c8ff09
--- /dev/null
+++ b/deps/lua/test/sieve.lua
@@ -0,0 +1,29 @@
+-- the sieve of of Eratosthenes programmed with coroutines
+-- typical usage: lua -e N=1000 sieve.lua | column
+
+-- generate all the numbers from 2 to n
+function gen (n)
+ return coroutine.wrap(function ()
+ for i=2,n do coroutine.yield(i) end
+ end)
+end
+
+-- filter the numbers generated by `g', removing multiples of `p'
+function filter (p, g)
+ return coroutine.wrap(function ()
+ while 1 do
+ local n = g()
+ if n == nil then return end
+ if math.mod(n, p) ~= 0 then coroutine.yield(n) end
+ end
+ end)
+end
+
+N=N or 1000 -- from command line
+x = gen(N) -- generate primes up to N
+while 1 do
+ local n = x() -- pick a number until done
+ if n == nil then break end
+ print(n) -- must be a prime number
+ x = filter(n, x) -- now remove its multiples
+end
diff --git a/deps/lua/test/sort.lua b/deps/lua/test/sort.lua
new file mode 100644
index 0000000000000000000000000000000000000000..0bcb15f837a7acd123b5426b3ecd90badbe5a6e1
--- /dev/null
+++ b/deps/lua/test/sort.lua
@@ -0,0 +1,66 @@
+-- two implementations of a sort function
+-- this is an example only. Lua has now a built-in function "sort"
+
+-- extracted from Programming Pearls, page 110
+function qsort(x,l,u,f)
+ if ly end)
+ show("after reverse selection sort",x)
+ qsort(x,1,n,function (x,y) return x>> ",string.rep(" ",level))
+ if t~=nil and t.currentline>=0 then io.write(t.short_src,":",t.currentline," ") end
+ t=debug.getinfo(2)
+ if event=="call" then
+ level=level+1
+ else
+ level=level-1 if level<0 then level=0 end
+ end
+ if t.what=="main" then
+ if event=="call" then
+ io.write("begin ",t.short_src)
+ else
+ io.write("end ",t.short_src)
+ end
+ elseif t.what=="Lua" then
+-- table.foreach(t,print)
+ io.write(event," ",t.name or "(Lua)"," <",t.linedefined,":",t.short_src,">")
+ else
+ io.write(event," ",t.name or "(C)"," [",t.what,"] ")
+ end
+ io.write("\n")
+end
+
+debug.sethook(hook,"cr")
+level=0
diff --git a/deps/lua/test/trace-globals.lua b/deps/lua/test/trace-globals.lua
new file mode 100644
index 0000000000000000000000000000000000000000..295e670caa2bc0aa95ea822b2a68c4305f6d31f0
--- /dev/null
+++ b/deps/lua/test/trace-globals.lua
@@ -0,0 +1,38 @@
+-- trace assigments to global variables
+
+do
+ -- a tostring that quotes strings. note the use of the original tostring.
+ local _tostring=tostring
+ local tostring=function(a)
+ if type(a)=="string" then
+ return string.format("%q",a)
+ else
+ return _tostring(a)
+ end
+ end
+
+ local log=function (name,old,new)
+ local t=debug.getinfo(3,"Sl")
+ local line=t.currentline
+ io.write(t.short_src)
+ if line>=0 then io.write(":",line) end
+ io.write(": ",name," is now ",tostring(new)," (was ",tostring(old),")","\n")
+ end
+
+ local g={}
+ local set=function (t,name,value)
+ log(name,g[name],value)
+ g[name]=value
+ end
+ setmetatable(getfenv(),{__index=g,__newindex=set})
+end
+
+-- an example
+
+a=1
+b=2
+a=10
+b=20
+b=nil
+b=200
+print(a,b,c)
diff --git a/deps/lua/test/xd.lua b/deps/lua/test/xd.lua
new file mode 100644
index 0000000000000000000000000000000000000000..ebc3effc06bfde46331640f4ddafcf47fea682fe
--- /dev/null
+++ b/deps/lua/test/xd.lua
@@ -0,0 +1,14 @@
+-- hex dump
+-- usage: lua xd.lua < file
+
+local offset=0
+while true do
+ local s=io.read(16)
+ if s==nil then return end
+ io.write(string.format("%08X ",offset))
+ string.gsub(s,"(.)",
+ function (c) io.write(string.format("%02X ",string.byte(c))) end)
+ io.write(string.rep(" ",3*(16-string.len(s))))
+ io.write(" ",string.gsub(s,"%c","."),"\n")
+ offset=offset+16
+end
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 764560dbebd2f716a135895b3f1a45ebbb3cd32e..d5cc8675a42e09ca656272222f460609cef76af9 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -218,7 +218,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
- 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
+ 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
- **显示一个数据表的创建语句**
@@ -696,9 +696,9 @@ Query OK, 1 row(s) in set (0.001091s)
* 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
-- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
+- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。且 `LIMIT 5 OFFSET 2` 可以简写为 `LIMIT 2, 5`。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
-- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
+- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。且 `SLIMIT 5 SOFFSET 2` 可以简写为 `SLIMIT 2, 5`。
- 通过 “>>” 输出结果可以导出到指定文件。
### 支持的条件过滤操作
@@ -1342,7 +1342,7 @@ SELECT function_list FROM stb_name
- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
- 2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STAT_WINDOW 语句的参数来指定。
+ 2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index d3bd7510a339c7386cdf83ce5806c2e3ad63db8e..9ad06cd1bd586577f3fb8968e3a998021d3ada9b 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -40,7 +40,7 @@
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
-# keepColumnName 0
+keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
diff --git a/packaging/release.sh b/packaging/release.sh
index 1d81f818a9b386e5520c5fa6b7499a7990cbe23a..5ba6c01a0bd5689278bdb5c86b538b3c447f086a 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -11,7 +11,7 @@ set -e
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
-# -d [taos | power]
+# -d [taos | power | tq ]
# -n [2.0.0.3]
# -m [2.0.0.0]
@@ -22,10 +22,10 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
+dbName=taos # [taos | power | tq]
allocator=glibc # [glibc | jemalloc]
-dbName=taos # [taos | power]
verNumber=""
-verNumberComp="2.0.0.0"
+verNumberComp="1.0.0.0"
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
do
@@ -78,7 +78,7 @@ do
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
- echo " -d [taos | power] "
+ echo " -d [taos | power | tq ] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
@@ -246,11 +246,15 @@ if [ "$osType" != "Darwin" ]; then
cd ${script_dir}/tools
if [[ "$dbName" == "taos" ]]; then
- ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ elif [[ "$dbName" == "tq" ]]; then
+ ${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
+ ${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
- ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
fi
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 325ac810539385f8a43fb655b76a8e211d65c872..57467e4b72e4dd1ae962de922d905f1c95c5e29f 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -270,6 +270,13 @@ function install_jemalloc() {
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
fi
}
@@ -765,9 +772,13 @@ vercomp () {
function is_version_compatible() {
- curr_version=$(${bin_dir}/taosd -V | head -1 | cut -d ' ' -f 3)
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
- min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5)
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
+ fi
vercomp $curr_version $min_compatible_version
case $? in
@@ -784,6 +795,7 @@ function update_TDengine() {
exit 1
fi
tar -zxf taos.tar.gz
+ install_jemalloc
# Check if version compatible
if ! is_version_compatible; then
@@ -822,7 +834,6 @@ function update_TDengine() {
install_log
install_header
install_lib
- install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
@@ -896,6 +907,7 @@ function install_TDengine() {
install_log
install_header
install_lib
+ install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh
index f47c3672cb0f806ee429209462c2a001be6090de..3a5e64153836096268dee2be08919cd774b68ebe 100755
--- a/packaging/tools/install_arbi.sh
+++ b/packaging/tools/install_arbi.sh
@@ -38,11 +38,11 @@ initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
-elif $(which service &> /dev/null); then
+elif $(which service &> /dev/null); then
service_mod=1
- service_config_dir="/etc/init.d"
+ service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
- initd_mod=1
+ initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
@@ -50,7 +50,7 @@ elif $(which service &> /dev/null); then
else
service_mod=2
fi
-else
+else
service_mod=2
fi
@@ -82,7 +82,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then
os_type=2
else
echo " osinfo: ${osinfo}"
- echo " This is an officially unverified linux system,"
+ echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
@@ -99,7 +99,7 @@ function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
- ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/bin
#${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
}
@@ -117,21 +117,74 @@ function install_bin() {
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
- if [ -e ${service_config_dir}/tarbitratord ]; then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
@@ -142,10 +195,10 @@ function clean_service_on_sysvinit() {
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
- fi
+ fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
-
+
if $(which init &> /dev/null); then
${csudo} init q || :
fi
@@ -164,10 +217,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi
-
+
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
-
+
if ((${initd_mod}==1)); then
${csudo} chkconfig --add tarbitratord || :
${csudo} chkconfig --level 2345 tarbitratord on || :
@@ -245,12 +298,13 @@ function update_TDengine() {
fi
sleep 1
fi
-
+
install_main_path
#install_header
install_bin
install_service
-
+ install_jemalloc
+
echo
#echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
@@ -259,7 +313,7 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
- fi
+ fi
echo
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
}
@@ -267,11 +321,13 @@ function update_TDengine() {
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
-
- install_main_path
+
+ install_main_path
#install_header
install_bin
install_service
+ install_jemalloc
+
echo
#echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh
index 3f271751511a124994a0f1833b59ff406dd9b227..883db2b7169d125309125887cb72279c92c4602a 100755
--- a/packaging/tools/install_arbi_power.sh
+++ b/packaging/tools/install_arbi_power.sh
@@ -38,11 +38,11 @@ initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
-elif $(which service &> /dev/null); then
+elif $(which service &> /dev/null); then
service_mod=1
- service_config_dir="/etc/init.d"
+ service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
- initd_mod=1
+ initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
@@ -50,7 +50,7 @@ elif $(which service &> /dev/null); then
else
service_mod=2
fi
-else
+else
service_mod=2
fi
@@ -82,7 +82,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then
os_type=2
else
echo " osinfo: ${osinfo}"
- echo " This is an officially unverified linux system,"
+ echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
@@ -99,7 +99,7 @@ function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
- ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/bin
#${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
}
@@ -115,23 +115,76 @@ function install_bin() {
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
}
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
- if [ -e ${service_config_dir}/tarbitratord ]; then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
@@ -142,10 +195,10 @@ function clean_service_on_sysvinit() {
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
- fi
+ fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
-
+
if $(which init &> /dev/null); then
${csudo} init q || :
fi
@@ -164,10 +217,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi
-
+
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
-
+
if ((${initd_mod}==1)); then
${csudo} chkconfig --add tarbitratord || :
${csudo} chkconfig --level 2345 tarbitratord on || :
@@ -245,12 +298,13 @@ function update_PowerDB() {
fi
sleep 1
fi
-
+
install_main_path
#install_header
install_bin
install_service
-
+ install_jemalloc
+
echo
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
@@ -259,7 +313,7 @@ function update_PowerDB() {
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
- fi
+ fi
echo
echo -e "\033[44;32;1mPowerDB's arbitrator is updated successfully!${NC}"
}
@@ -267,11 +321,13 @@ function update_PowerDB() {
function install_PowerDB() {
# Start to install
echo -e "${GREEN}Start to install PowerDB's arbitrator ...${NC}"
-
- install_main_path
+
+ install_main_path
#install_header
install_bin
install_service
+ install_jemalloc
+
echo
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bd852dd0ad2c9114f2424193adccf56b0cb40412
--- /dev/null
+++ b/packaging/tools/install_arbi_tq.sh
@@ -0,0 +1,298 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_tq.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install tqd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+# tq:2345:respawn:/etc/init.d/tarbitratord start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tarbitrator
+ fi
+}
+
+function update_tq() {
+ # Start to update
+ echo -e "${GREEN}Start to update TQ's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mTQ's arbitrator is updated successfully!${NC}"
+}
+
+function install_tq() {
+ # Start to install
+ echo -e "${GREEN}Start to install TQ's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mTQ's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update_tq
+else
+ install_tq
+fi
+
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index 0a0a6633e376d084532abb5f490917abd1a173f2..9044f2367214510d33e35ce6569ec204f2845f81 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -119,16 +119,16 @@ function install_lib() {
if [ "$osType" != "Darwin" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
-
+
if [ -d "${lib64_link_dir}" ]; then
- ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
- ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
- fi
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
else
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
+
${csudo} ldconfig
}
@@ -139,6 +139,53 @@ function install_header() {
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+ ${csudo} ldconfig
+ fi
+}
+
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
@@ -194,6 +241,7 @@ function update_TDengine() {
install_log
install_header
install_lib
+ install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
@@ -217,10 +265,11 @@ function install_TDengine() {
echo -e "${GREEN}Start to install TDengine client...${NC}"
- install_main_path
+ install_main_path
install_log
install_header
install_lib
+ install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh
index 8d7463366ff46bcae2822ee3e76dbc9b588f2a89..31da0d61319045800fe3a454d071118aa3a4768e 100755
--- a/packaging/tools/install_client_power.sh
+++ b/packaging/tools/install_client_power.sh
@@ -119,16 +119,16 @@ function install_lib() {
if [ "$osType" != "Darwin" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
-
+
if [ -d "${lib64_link_dir}" ]; then
- ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
- ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
- fi
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
else
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
+
${csudo} ldconfig
}
@@ -139,6 +139,59 @@ function install_header() {
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
@@ -181,6 +234,7 @@ function update_PowerDB() {
exit 1
fi
tar -zxf power.tar.gz
+ install_jemalloc
echo -e "${GREEN}Start to update PowerDB client...${NC}"
# Stop the client shell if running
@@ -217,10 +271,11 @@ function install_PowerDB() {
echo -e "${GREEN}Start to install PowerDB client...${NC}"
- install_main_path
+ install_main_path
install_log
install_header
install_lib
+ install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2537442ee264e9aeb4eb6b3d25a17faf60f4df9a
--- /dev/null
+++ b/packaging/tools/install_client_tq.sh
@@ -0,0 +1,251 @@
+#!/bin/bash
+#
+# This file is used to install TQ client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/tq"
+ log_dir="/var/log/tq"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/tq"
+ log_dir="~/TQLog"
+fi
+
+log_link_dir="/usr/local/tq/log"
+
+cfg_install_dir="/etc/tq"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+# old bin dir
+bin_dir="/usr/local/tq/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/tq ] && ${csudo} ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || :
+ if [ "$osType" != "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || :
+ [ -x ${install_main_dir}/bin/tqdump ] && ${csudo} ln -s ${install_main_dir}/bin/tqdump ${bin_link_dir}/tqdump || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_tq.sh ${bin_link_dir}/rmtq || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update_tq() {
+ # Start to update
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to update TQ client...${NC}"
+ # Stop the client shell if running
+ if pidof tq &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+function install_tq() {
+ # Start to install
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to install TQ client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/tqd ]; then
+ echo -e "\033[44;32;1mThere are already installed TQ server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/tq ]; then
+ update_flag=1
+ update_tq
+ else
+ install_tq
+ fi
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index 9f28435cb5f1cc43ae3bf1433074920889ccee3b..d0220cca2597ec42cd61a5775017fdbdde55b753 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -58,11 +58,11 @@ initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
-elif $(which service &> /dev/null); then
+elif $(which service &> /dev/null); then
service_mod=1
- service_config_dir="/etc/init.d"
+ service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
- initd_mod=1
+ initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
@@ -70,7 +70,7 @@ elif $(which service &> /dev/null); then
else
service_mod=2
fi
-else
+else
service_mod=2
fi
@@ -102,7 +102,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then
os_type=2
else
echo " osinfo: ${osinfo}"
- echo " This is an officially unverified linux system,"
+ echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
@@ -137,7 +137,7 @@ do
echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
exit 0
;;
- ?) #unknow option
+ ?) #unknow option
echo "unkonw argument"
exit 1
;;
@@ -156,9 +156,9 @@ function kill_process() {
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
- ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg
- ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples
@@ -200,29 +200,82 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
- ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
-
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
-
+
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
-
- #if [ "$verMode" == "cluster" ]; then
+
+ #if [ "$verMode" == "cluster" ]; then
# # Compatible with version 1.5
# ${csudo} mkdir -p ${v15_java_app_dir}
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
# ${csudo} chmod 777 ${v15_java_app_dir} || :
#fi
-
+
${csudo} ldconfig
}
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -239,13 +292,13 @@ function add_newHostname_to_hosts() {
if [[ "$s" == "$localIp" ]]; then
return
fi
- done
+ done
${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
}
function set_hostname() {
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
- read newHostname
+ read newHostname
while true; do
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
break
@@ -259,25 +312,25 @@ function set_hostname() {
if [[ $retval != 0 ]]; then
echo
echo "set hostname fail!"
- return
+ return
fi
#echo -e -n "$(hostnamectl status --static)"
#echo -e -n "$(hostnamectl status --transient)"
#echo -e -n "$(hostnamectl status --pretty)"
-
+
#ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then
${csudo} echo $newHostname > /etc/hostname ||:
fi
-
+
#debian: #HOSTNAME=yourname
if [[ -e /etc/sysconfig/network ]]; then
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
fi
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
- serverFqdn=$newHostname
-
+ serverFqdn=$newHostname
+
if [[ -e /etc/hosts ]]; then
add_newHostname_to_hosts $newHostname
fi
@@ -295,7 +348,7 @@ function is_correct_ipaddr() {
return 0
fi
done
-
+
return 1
}
@@ -309,13 +362,13 @@ function set_ipAsFqdn() {
echo
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1"
- # Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
serverFqdn=$localFqdn
echo
return
- fi
-
+ fi
+
echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
echo
echo -e -n "${GREEN}$iplist${NC}"
@@ -324,15 +377,15 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
read localFqdn
while true; do
- if [ ! -z "$localFqdn" ]; then
+ if [ ! -z "$localFqdn" ]; then
# Check if correct ip address
is_correct_ipaddr $localFqdn
retval=`echo $?`
if [[ $retval != 0 ]]; then
read -p "Please choose an IP from local IP list:" localFqdn
else
- # Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
serverFqdn=$localFqdn
break
fi
@@ -347,59 +400,59 @@ function local_fqdn_check() {
echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo
- if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
- echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
- echo
-
- while true
- do
- read -r -p "Set hostname now? [Y/n] " input
- if [ ! -n "$input" ]; then
- set_hostname
- break
- else
- case $input in
- [yY][eE][sS]|[yY])
- set_hostname
- break
- ;;
-
- [nN][oO]|[nN])
- set_ipAsFqdn
- break
- ;;
-
- *)
- echo "Invalid input..."
- ;;
- esac
- fi
- done
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
fi
}
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
+
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
- fi
-
+ fi
+
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client
-
+
if ((${update_flag}==1)); then
return 0
fi
-
+
if [ "$interactiveFqdn" == "no" ]; then
return 0
fi
-
+
local_fqdn_check
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
@@ -417,8 +470,8 @@ function install_config() {
if [ ! -z "$firstEp" ]; then
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then
- # Write the first FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
break
#else
# read -p "Please enter the correct FQDN:port: " firstEp
@@ -426,21 +479,21 @@ function install_config() {
else
break
fi
- done
+ done
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
-
+
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
function install_data() {
${csudo} mkdir -p ${data_dir}
-
- ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
}
function install_connector() {
@@ -455,26 +508,26 @@ function install_examples() {
function clean_service_on_sysvinit() {
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
if pidof powerd &> /dev/null; then
${csudo} service powerd stop || :
fi
-
+
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
- if [ -e ${service_config_dir}/powerd ]; then
+ if [ -e ${service_config_dir}/powerd ]; then
${csudo} chkconfig --del powerd || :
fi
- if [ -e ${service_config_dir}/tarbitratord ]; then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
- if [ -e ${service_config_dir}/powerd ]; then
+ if [ -e ${service_config_dir}/powerd ]; then
${csudo} insserv -r powerd || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
@@ -488,10 +541,10 @@ function clean_service_on_sysvinit() {
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
-
+
${csudo} rm -f ${service_config_dir}/powerd || :
${csudo} rm -f ${service_config_dir}/tarbitratord || :
-
+
if $(which init &> /dev/null); then
${csudo} init q || :
fi
@@ -514,10 +567,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi
-
+
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
-
+
if ((${initd_mod}==1)); then
${csudo} chkconfig --add powerd || :
${csudo} chkconfig --level 2345 powerd on || :
@@ -542,7 +595,7 @@ function clean_service_on_systemd() {
fi
${csudo} systemctl disable powerd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${powerd_service_config}
-
+
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..."
@@ -550,7 +603,7 @@ function clean_service_on_systemd() {
fi
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
-
+
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then
@@ -558,8 +611,8 @@ function clean_service_on_systemd() {
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
- ${csudo} rm -f ${nginx_service_config}
- fi
+ ${csudo} rm -f ${nginx_service_config}
+ fi
}
# power:2345:respawn:/etc/init.d/powerd start
@@ -590,7 +643,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Install]' >> ${powerd_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${powerd_service_config}"
${csudo} systemctl enable powerd
-
+
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
@@ -612,9 +665,9 @@ function install_service_on_systemd() {
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
- #${csudo} systemctl enable tarbitratord
-
- if [ "$verMode" == "cluster" ]; then
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
@@ -643,7 +696,7 @@ function install_service_on_systemd() {
${csudo} systemctl enable nginxd
fi
${csudo} systemctl start nginxd
- fi
+ fi
}
function install_service() {
@@ -688,9 +741,13 @@ vercomp () {
function is_version_compatible() {
- curr_version=$(${bin_dir}/powerd -V | head -1 | cut -d ' ' -f 3)
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
- min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5)
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
+ fi
vercomp $curr_version $min_compatible_version
case $? in
@@ -707,6 +764,7 @@ function update_PowerDB() {
exit 1
fi
tar -zxf power.tar.gz
+ install_jemalloc
# Check if version compatible
if ! is_version_compatible; then
@@ -725,8 +783,8 @@ function update_PowerDB() {
kill_process powerd
fi
sleep 1
- fi
- if [ "$verMode" == "cluster" ]; then
+ fi
+ if [ "$verMode" == "cluster" ]; then
if pidof nginx &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop nginxd || :
@@ -738,7 +796,7 @@ function update_PowerDB() {
sleep 1
fi
fi
-
+
install_main_path
install_log
@@ -751,10 +809,10 @@ function update_PowerDB() {
if [ -z $1 ]; then
install_bin
install_service
- install_config
-
+ install_config
+
openresty_work=false
- if [ "$verMode" == "cluster" ]; then
+ if [ "$verMode" == "cluster" ]; then
# Check if openresty is installed
# Check if nginx is installed successfully
if type curl &> /dev/null; then
@@ -765,7 +823,7 @@ function update_PowerDB() {
echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m"
fi
fi
- fi
+ fi
#echo
#echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
@@ -784,7 +842,7 @@ function update_PowerDB() {
else
echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power -h $serverFqdn${NC} in shell${NC}"
fi
-
+
echo
echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
else
@@ -807,16 +865,17 @@ function install_PowerDB() {
tar -zxf power.tar.gz
echo -e "${GREEN}Start to install PowerDB...${NC}"
-
- install_main_path
-
+
+ install_main_path
+
if [ -z $1 ]; then
install_data
- fi
-
- install_log
+ fi
+
+ install_log
install_header
install_lib
+ install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
@@ -839,8 +898,8 @@ function install_PowerDB() {
fi
fi
fi
-
- install_config
+
+ install_config
# Ask if to start the service
#echo
@@ -853,35 +912,35 @@ function install_PowerDB() {
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} service powerd start${NC}"
else
echo -e "${GREEN_DARK}To start PowerDB ${NC}: powerd${NC}"
- fi
+ fi
#if [ ${openresty_work} = 'true' ]; then
# echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
#else
# echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}"
#fi
-
+
if [ ! -z "$firstEp" ]; then
- tmpFqdn=${firstEp%%:*}
- substr=":"
- if [[ $firstEp =~ $substr ]];then
- tmpPort=${firstEp#*:}
- else
- tmpPort=""
- fi
- if [[ "$tmpPort" != "" ]];then
- echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
- else
- echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
- fi
- echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
- echo
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
elif [ ! -z "$serverFqdn" ]; then
- echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $serverFqdn${GREEN_DARK} to login into PowerDB server${NC}"
- echo
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $serverFqdn${GREEN_DARK} to login into PowerDB server${NC}"
+ echo
fi
echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
- echo
+ echo
else # Only install client
install_bin
install_config
@@ -913,6 +972,6 @@ elif [ "$verType" == "client" ]; then
else
install_PowerDB client
fi
-else
- echo "please input correct verType"
+else
+ echo "please input correct verType"
fi
diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..52e08cb6b0d00b25686b87e2f066401e0388d4ce
--- /dev/null
+++ b/packaging/tools/install_tq.sh
@@ -0,0 +1,977 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/tq"
+log_dir="/var/log/tq"
+
+data_link_dir="/usr/local/tq/data"
+log_link_dir="/usr/local/tq/log"
+
+cfg_install_dir="/etc/tq"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+# old bin dir
+bin_dir="/usr/local/tq/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqd || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/tq ] && ${csudo} ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || :
+ [ -x ${install_main_dir}/bin/tqd ] && ${csudo} ln -s ${install_main_dir}/bin/tqd ${bin_link_dir}/tqd || :
+ [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || :
+ [ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ #if [ "$verMode" == "cluster" ]; then
+ # # Compatible with version 1.5
+ # ${csudo} mkdir -p ${v15_java_app_dir}
+ # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
+ # ${csudo} chmod 777 ${v15_java_app_dir} || :
+ #fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
+function add_newHostname_to_hosts() {
+ localIp="127.0.0.1"
+ OLD_IFS="$IFS"
+ IFS=" "
+ iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
+ arr=($iphost)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$localIp" ]]; then
+ return
+ fi
+ done
+ ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
+}
+
+function set_hostname() {
+ echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
+ read newHostname
+ while true; do
+ if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
+ break
+ else
+ read -p "Please enter one hostname(must not be 'localhost'):" newHostname
+ fi
+ done
+
+ ${csudo} hostname $newHostname ||:
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ echo
+ echo "set hostname fail!"
+ return
+ fi
+ #echo -e -n "$(hostnamectl status --static)"
+ #echo -e -n "$(hostnamectl status --transient)"
+ #echo -e -n "$(hostnamectl status --pretty)"
+
+ #ubuntu/centos /etc/hostname
+ if [[ -e /etc/hostname ]]; then
+ ${csudo} echo $newHostname > /etc/hostname ||:
+ fi
+
+ #debian: #HOSTNAME=yourname
+ if [[ -e /etc/sysconfig/network ]]; then
+ ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
+ fi
+
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$newHostname
+
+ if [[ -e /etc/hosts ]]; then
+ add_newHostname_to_hosts $newHostname
+ fi
+}
+
+function is_correct_ipaddr() {
+ newIp=$1
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($iplist)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$newIp" ]]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function set_ipAsFqdn() {
+ iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
+ if [ -z "$iplist" ]; then
+ iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
+ fi
+
+ if [ -z "$iplist" ]; then
+ echo
+ echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
+ localFqdn="127.0.0.1"
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ echo
+ return
+ fi
+
+ echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
+ echo
+ echo -e -n "${GREEN}$iplist${NC}"
+ echo
+ echo
+ echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
+ read localFqdn
+ while true; do
+ if [ ! -z "$localFqdn" ]; then
+ # Check if correct ip address
+ is_correct_ipaddr $localFqdn
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ read -p "Please choose an IP from local IP list:" localFqdn
+ else
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ break
+ fi
+ else
+ read -p "Please choose an IP from local IP list:" localFqdn
+ fi
+ done
+}
+
+function local_fqdn_check() {
+ #serverFqdn=$(hostname)
+ echo
+ echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
+ echo
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
+ fi
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ local_fqdn_check
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for TQ cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TQ cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tqd &> /dev/null; then
+ ${csudo} service tqd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} chkconfig --del tqd || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} insserv -r tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} update-rc.d -f tqd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tqd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install tqd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tqd.deb ${install_main_dir}/init.d/tqd
+ ${csudo} cp ${script_dir}/init.d/tqd.deb ${service_config_dir}/tqd && ${csudo} chmod a+x ${service_config_dir}/tqd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tqd.rpm ${install_main_dir}/init.d/tqd
+ ${csudo} cp ${script_dir}/init.d/tqd.rpm ${service_config_dir}/tqd && ${csudo} chmod a+x ${service_config_dir}/tqd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tqd || :
+ ${csudo} chkconfig --level 2345 tqd on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tqd || :
+ ${csudo} insserv -d tqd || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tqd defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tqd_service_config="${service_config_dir}/tqd.service"
+ if systemctl is-active --quiet tqd; then
+ echo "TQ is running, stopping it..."
+ ${csudo} systemctl stop tqd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tqd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tqd_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+# tq:2345:respawn:/etc/init.d/tqd start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tqd_service_config="${service_config_dir}/tqd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Description=TQ server service' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo >> ${tqd_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tqd' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'ExecStartPre=/usr/local/tq/bin/startPre.sh' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo >> ${tqd_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tqd_service_config}"
+ ${csudo} systemctl enable tqd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop tqd
+ kill_process tqd
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
+
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
+ fi
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update_tq() {
+ # Start to update
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+ install_jemalloc
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update TQ...${NC}"
+ # Stop the service if running
+ if pidof tqd &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tqd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tqd stop || :
+ else
+ kill_process tqd
+ fi
+ sleep 1
+ fi
+ if [ "$verMode" == "cluster" ]; then
+ if pidof nginx &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop nginxd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service nginxd stop || :
+ else
+ kill_process nginx
+ fi
+ sleep 1
+ fi
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for TQ is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mTQ is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} service tqd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ./tqd${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mTQ is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+function install_tq() {
+ # Start to install
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to install TQ...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for TQ is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mTQ is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} service tqd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start TQ ${NC}: tqd${NC}"
+ fi
+
+ #if [ ${openresty_work} = 'true' ]; then
+ # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ #else
+ # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell${NC}"
+ #fi
+
+ if [ ! -z "$firstEp" ]; then
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ elif [ ! -z "$serverFqdn" ]; then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $serverFqdn${GREEN_DARK} to login into TQ server${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mTQ is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+serverFqdn=$(hostname)
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/tqd ]; then
+ update_flag=1
+ update_tq
+ else
+ install_tq
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/tq ]; then
+ update_flag=1
+ update_tq client
+ else
+ install_tq client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 0c755d9f728208cbfc2302ef45d7537e437dbb5b..2b631caa69eba7ae63bccc336cb582e61925d014 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -176,6 +176,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
fi
}
+
function install_jemalloc() {
if [ "$osType" != "Darwin" ]; then
/usr/bin/install -c -d /usr/local/bin
@@ -217,6 +218,13 @@ function install_jemalloc() {
/usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
fi
}
diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c10dfec255d411965a3887942e5d2aded4635979
--- /dev/null
+++ b/packaging/tools/makearbi_tq.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-arbitrator-${version}"
+else
+ install_dir="${release_dir}/TQ-arbitrator-${version}"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh"
+install_files="${script_dir}/install_arbi_tq.sh"
+
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_tq.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..51fd064e1b769191c2baaf27c3a45f73a475cabd
--- /dev/null
+++ b/packaging/tools/makeclient_tq.sh
@@ -0,0 +1,203 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-client-${version}"
+else
+ install_dir="${release_dir}/TQ-client-${version}"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+# if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/tqd
+# strip ${build_dir}/bin/tq
+# bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
+# else
+# bin_files="${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${script_dir}/remove_client_tq.sh ${script_dir}/set_core.sh"
+# fi
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
+install_files="${script_dir}/install_client_tq.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f tq.tar.gz * --remove-files || :
+else
+ tar -zcv -f tq.tar.gz * || :
+ mv tq.tar.gz ..
+ rm -rf ./*
+ mv ../tq.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh
+ mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh
+ mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh
+fi
+chmod a+x ${install_dir}/install_client_tq.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ if [ "$osType" != "Darwin" ]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+ fi
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 624f72278a87be1d34d64d4e8b9381cbe663bede..81061416a233ef88c376f35c928fadabe3fe8202 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -14,6 +14,7 @@ osType=$5
verMode=$6
verType=$7
pagMode=$8
+versionComp=$9
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
@@ -175,8 +176,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
-mkdir -p ${install_dir}/driver
-cp ${lib_files} ${install_dir}/driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
# Copy connector
connector_dir="${code_dir}/connector"
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index 633a135c14e908786b5a18a32077a7d987a46d4a..a2643b7486195041466d28d84d25a6b5aa05974e 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -14,6 +14,7 @@ osType=$5
verMode=$6
verType=$7
pagMode=$8
+versionComp=$9
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
@@ -32,10 +33,10 @@ fi
# Directories and files.
#if [ "$pagMode" == "lite" ]; then
-# strip ${build_dir}/bin/taosd
+# strip ${build_dir}/bin/taosd
# strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
-#else
+#else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\
# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
#fi
@@ -70,19 +71,19 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then
- strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
-else
+else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
- cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
- cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
@@ -99,14 +100,14 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh
mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh
-
+
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
- sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
-
+
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
@@ -149,17 +150,16 @@ sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
- sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
- cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go
fi
# Copy driver
-mkdir -p ${install_dir}/driver
-cp ${lib_files} ${install_dir}/driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
# Copy connector
connector_dir="${code_dir}/connector"
@@ -178,11 +178,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector/
-
+
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
-
+
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
-
+
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
@@ -190,7 +190,7 @@ fi
# exit 1
-cd ${release_dir}
+cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
@@ -207,8 +207,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
-elif [ "$verType" == "stable" ]; then
- pkg_name=${pkg_name}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6f897de0ce5e7287e06719562199e8ed139b02ec
--- /dev/null
+++ b/packaging/tools/makepkg_tq.sh
@@ -0,0 +1,224 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+versionComp=$9
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-server-${version}"
+else
+ install_dir="${release_dir}/TQ-server-${version}"
+fi
+
+# Directories and files.
+#if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/taosd
+# strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
+#else
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\
+# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
+#fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+install_files="${script_dir}/install_tq.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# Init file
+#init_dir=${script_dir}/deb
+#if [ $package_type = "centos" ]; then
+# init_dir=${script_dir}/rpm
+#fi
+#init_files=${init_dir}/tqd
+# temp use rpm's tqd. TODO: later modify according to os type
+#init_file_deb=${script_dir}/../deb/tqd
+#init_file_rpm=${script_dir}/../rpm/tqd
+#init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+#init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+else
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/startPre.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+#mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/tqd.deb
+#mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/tqd.rpm
+#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh
+ mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js
+
+ sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+ sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+ sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+cd ${install_dir}
+tar -zcv -f tq.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar tq.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_tq.sh >> install_tq_temp.sh
+ mv install_tq_temp.sh ${install_dir}/install_tq.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_tq_temp.sh
+ mv install_tq_temp.sh ${install_dir}/install_tq.sh
+fi
+chmod a+x ${install_dir}/install_tq.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3d99b6d41a74938d74383df3d8cdfc75c2ebb7c8
--- /dev/null
+++ b/packaging/tools/remove_arbi_tq.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TQ's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TQ tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "TQ's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}"
+echo
\ No newline at end of file
diff --git a/packaging/tools/remove_client_tq.sh b/packaging/tools/remove_client_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ad8056c18cc32623edb8b77bf6aa17070acc1cbc
--- /dev/null
+++ b/packaging/tools/remove_client_tq.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+log_link_dir="/usr/local/tq/log"
+cfg_link_dir="/usr/local/tq/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ #pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$(pidof tq)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}TQ client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..211eed4dff09ab5da00d5c475cd93148b5ce1b24
--- /dev/null
+++ b/packaging/tools/remove_tq.sh
@@ -0,0 +1,227 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tq"
+data_link_dir="/usr/local/tq/data"
+log_link_dir="/usr/local/tq/log"
+cfg_link_dir="/usr/local/tq/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+service_config_dir="/etc/systemd/system"
+tq_service_name="tqd"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tqd() {
+ pid=$(ps -ef | grep "tqd" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqd || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ tq_service_config="${service_config_dir}/${tq_service_name}.service"
+ if systemctl is-active --quiet ${tq_service_name}; then
+ echo "TQ tqd is running, stopping it..."
+ ${csudo} systemctl stop ${tq_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tq_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tq_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TDengine tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tqd &> /dev/null; then
+ echo "TQ tqd is running, stopping it..."
+ ${csudo} service tqd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "TQ tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} chkconfig --del tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} insserv -r tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} update-rc.d -f tqd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tqd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tqd
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+#if echo $osinfo | grep -qwi "ubuntu" ; then
+## echo "this is ubuntu system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "debian" ; then
+## echo "this is debian system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "centos" ; then
+## echo "this is centos system"
+# ${csudo} rpm -e --noscripts tdengine || :
+#fi
+
+echo -e "${GREEN}TQ is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh
old mode 100644
new mode 100755
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index fb43751b9e8fd715d538abb1198e1bdfd0a2e9ae..6ff36c33c46a16b5b5df86f390003a7e31876bdb 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -18,6 +18,10 @@ IF (TD_LINUX)
# generate dynamic library (*.so)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt)
+ IF (TD_LINUX_64)
+ TARGET_LINK_LIBRARIES(taos lua)
+ ENDIF ()
+
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .so
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 049d03a4389b77ef3c8f40b43fd9f1a70d0d82e6..c962a1ef4202950c11572cc183feaee2b95f3a0c 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -200,7 +200,7 @@ int32_t tscGetResRowLength(SArray* pExprList);
SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
-SExprInfo* tscExprCreate(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, int32_t colType);
void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 668a9e940657f0cde8d8406dbe4a18fdb9c72f7e..d1a325be3592a1789ac62661e61a84e6ccb969d3 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -920,7 +920,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaInfo);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index b75088ba282b29c64ae72ccc4b52928b3b9bd1ee..08d3cc599ee0d66df11da5b22ebdf3437a58295f 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -1163,7 +1163,7 @@ static void insertBatchClean(STscStmt* pStmt) {
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
- taosHashEmpty(pCmd->insertParam.pTableBlockHashList);
+ taosHashClear(pCmd->insertParam.pTableBlockHashList);
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
tfree(pSql->pSubs);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 622636a9b5b6a5705f1b1cf64263fb66c34de4b8..129af8d1a31dda9f84afb452e4b4394321e8efa4 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -20,7 +20,12 @@
#define _GNU_SOURCE
#endif // __APPLE__
+#include
#include "os.h"
+#include "qPlan.h"
+#include "qSqlparser.h"
+#include "qTableMeta.h"
+#include "qUtil.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
@@ -28,14 +33,11 @@
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
-#include "qTableMeta.h"
#include "tsclient.h"
#include "tstrbuild.h"
#include "ttoken.h"
#include "ttokendef.h"
#include "ttype.h"
-#include "qUtil.h"
-#include "qPlan.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@@ -63,8 +65,8 @@ static SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int3
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
-static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
-static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType, uint8_t precision);
+static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
+static bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
@@ -73,14 +75,13 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len);
-
static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength);
static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult);
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
int8_t type, char* fieldName, SExprInfo* pSqlExpr);
-static uint8_t convertOptr(SStrToken *pToken);
+static uint8_t convertRelationalOperator(SStrToken *pToken);
static int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery, bool timeWindowQuery);
@@ -93,7 +94,7 @@ static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCm
static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding);
-static int32_t validateStateWindowNode(SSqlCmd* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
+static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem);
@@ -119,7 +120,7 @@ static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
static int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql);
-static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg);
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
@@ -133,6 +134,7 @@ static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSql
static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInfo);
+
static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid);
static bool validateDebugFlag(int32_t v);
static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
@@ -148,7 +150,7 @@ int16_t getNewResColId(SSqlCmd* pCmd) {
}
// serialize expr in exprlist to binary
-// formate "type | size | value"
+// format "type | size | value"
bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision) {
bool ret = false;
if (!pList || pList->size <= 0 || colType < 0) {
@@ -172,7 +174,7 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
tVariant* var = &pSub->value;
- // check all the token type in expr list same or not
+ // check all the exprToken type in expr list same or not
if (firstVarType != var->nType) {
break;
}
@@ -239,7 +241,7 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
}
-static uint8_t convertOptr(SStrToken *pToken) {
+static uint8_t convertRelationalOperator(SStrToken *pToken) {
switch (pToken->type) {
case TK_LT:
return TSDB_RELATION_LESS;
@@ -754,6 +756,8 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i);
tscTrace("%p start to parse %dth subclause, total:%"PRIzu, pSql, i, size);
+
+// normalizeSqlNode(pSqlNode); // normalize the column name in each function
if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code;
}
@@ -917,7 +921,8 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn
int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
const char* msg1 = "sliding cannot be used without interval";
- const char* msg2 = "interval cannot be less than 10 ms";
+ const char* msg2 = "interval cannot be less than 1 us";
+ const char* msg3 = "interval value is too small";
SSqlCmd* pCmd = &pSql->cmd;
@@ -944,6 +949,10 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+ if (pQueryInfo->interval.interval <= 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
// interval cannot be less than 10 milliseconds
@@ -990,7 +999,7 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -1056,7 +1065,7 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
@@ -1198,16 +1207,17 @@ int32_t tscSetTableFullName(SName* pName, SStrToken* pTableName, SSqlObj* pSql)
} else { // get current DB name first, and then set it into path
char* t = cloneCurrentDBName(pSql);
if (strlen(t) == 0) {
+ tfree(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
code = tNameFromString(pName, t, T_NAME_ACCT | T_NAME_DB);
if (code != 0) {
- free(t);
+ tfree(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
- free(t);
+ tfree(t);
if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -1617,8 +1627,8 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double),
getNewResColId(pCmd), sizeof(double), false);
- char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z;
- size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->token.n + 1);
+ char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->exprToken.z;
+ size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->exprToken.n + 1);
tstrncpy(pExpr->base.aliasName, name, len);
tExprNode* pNode = NULL;
@@ -1830,7 +1840,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
- pItem->pNode->functionId = isValidFunction(pItem->pNode->operand.z, pItem->pNode->operand.n);
+ pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
if (pItem->pNode->functionId < 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
@@ -1841,10 +1851,10 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
} else if (type == SQL_NODE_TABLE_COLUMN || type == SQL_NODE_VALUE) {
// use the dynamic array list to decide if the function is valid or not
- // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
- if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
+ // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
+ if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
} else if (type == SQL_NODE_EXPR) {
int32_t code = handleArithmeticExpr(pCmd, pQueryInfo, i, pItem);
if (code != TSDB_CODE_SUCCESS) {
@@ -1863,6 +1873,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
if (!isValidDistinctSql(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
+
pQueryInfo->distinctTag = true;
}
@@ -1978,8 +1989,8 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
}
int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem) {
- const char* msg0 = "invalid column name";
const char* msg1 = "tag for normal table query is not allowed";
+ const char* msg2 = "invalid column name";
int32_t startPos = (int32_t)tscNumOfExprs(pQueryInfo);
int32_t optr = pItem->pNode->tokenId;
@@ -1988,8 +1999,8 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// all meters columns are required
@@ -2015,7 +2026,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
index.columnIndex = (pQueryInfo->udColumnId--);
index.tableIndex = 0;
- SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->token, pItem->aliasName);
+ SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->exprToken, pItem->aliasName);
SExprInfo* pExpr =
tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC, getNewResColId(pCmd));
@@ -2025,8 +2036,8 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
} else if (optr == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (getColumnIndexByName(&pItem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
@@ -2125,8 +2136,8 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
}
} else { // use the user-input result column name
- int32_t len = MIN(pItem->pNode->token.n + 1, TSDB_COL_NAME_LEN);
- tstrncpy(name, pItem->pNode->token.z, len);
+ int32_t len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN);
+ tstrncpy(name, pItem->pNode->exprToken.z, len);
}
}
@@ -2179,27 +2190,26 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
switch (functionId) {
case TSDB_FUNC_COUNT: {
- /* more than one parameter for count() function */
- if (pItem->pNode->pParam != NULL && taosArrayGetSize(pItem->pNode->pParam) != 1) {
+ /* more than one parameter for count() function */
+ if (pItem->pNode->Expr.paramList != NULL && taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SExprInfo* pExpr = NULL;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (pItem->pNode->pParam != NULL) {
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
- SStrToken* pToken = &pParamElem->pNode->colInfo;
+ if (pItem->pNode->Expr.paramList != NULL) {
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
+ SStrToken* pToken = &pParamElem->pNode->columnName;
int16_t tokenId = pParamElem->pNode->tokenId;
if ((pToken->z == NULL || pToken->n == 0) && (TK_INTEGER != tokenId)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- // select count(table.*)
- // select count(1)|count(2)
+ // select count(table.*), select count(1), count(2)
if (tokenId == TK_ALL || tokenId == TK_INTEGER) {
// check if the table name is valid or not
- SStrToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->columnName;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
@@ -2210,7 +2220,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false);
} else {
// count the number of table created according to the super table
- if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2256,6 +2266,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_SUM:
case TSDB_FUNC_AVG:
case TSDB_FUNC_RATE:
@@ -2268,22 +2279,22 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_STDDEV:
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
- int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam);
+ int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList);
// no parameters or more than one parameter for function
- if (pItem->pNode->pParam == NULL ||
+ if (pItem->pNode->Expr.paramList == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
+ if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2393,12 +2404,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_FIRST:
case TSDB_FUNC_LAST:
case TSDB_FUNC_SPREAD:
case TSDB_FUNC_LAST_ROW:
case TSDB_FUNC_INTERP: {
- bool requireAllFields = (pItem->pNode->pParam == NULL);
+ bool requireAllFields = (pItem->pNode->Expr.paramList == NULL);
// NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query
SConvertFunc cvtFunc = {.originFuncId = functionId, .execFuncId = functionId};
@@ -2409,17 +2421,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (!requireAllFields) {
- if (taosArrayGetSize(pItem->pNode->pParam) < 1) {
+ if (taosArrayGetSize(pItem->pNode->Expr.paramList) < 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (taosArrayGetSize(pItem->pNode->pParam) > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
+ if (taosArrayGetSize(pItem->pNode->Expr.paramList) > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
/* in first/last function, multiple columns can be add to resultset */
- for (int32_t i = 0; i < taosArrayGetSize(pItem->pNode->pParam); ++i) {
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, i);
+ for (int32_t i = 0; i < taosArrayGetSize(pItem->pNode->Expr.paramList); ++i) {
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, i);
if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2427,7 +2439,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pParamElem->pNode->tokenId == TK_ALL) { // select table.*
- SStrToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->columnName;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
@@ -2448,7 +2460,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
} else {
- if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2462,16 +2474,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
char name[TSDB_COL_NAME_LEN] = {0};
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
- bool multiColOutput = taosArrayGetSize(pItem->pNode->pParam) > 1;
- setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->colInfo, multiColOutput);
+ bool multiColOutput = taosArrayGetSize(pItem->pNode->Expr.paramList) > 1;
+ setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName, multiColOutput);
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult) != 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
-
- return TSDB_CODE_SUCCESS;
+
} else { // select * from xxx
int32_t numOfFields = 0;
@@ -2499,9 +2510,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
numOfFields += tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
}
-
- return TSDB_CODE_SUCCESS;
}
+ return TSDB_CODE_SUCCESS;
}
case TSDB_FUNC_TOP:
@@ -2509,18 +2519,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_PERCT:
case TSDB_FUNC_APERCT: {
// 1. valid the number of parameters
- if (pItem->pNode->pParam == NULL || taosArrayGetSize(pItem->pNode->pParam) != 2) {
+ if (pItem->pNode->Expr.paramList == NULL || taosArrayGetSize(pItem->pNode->Expr.paramList) != 2) {
/* no parameters or more than one parameter for function */
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
if (pParamElem->pNode->tokenId != TK_ID) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2615,7 +2625,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
return TSDB_CODE_SUCCESS;
- };
+ }
case TSDB_FUNC_TID_TAG: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -2624,15 +2634,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
// no parameters or more than one parameter for function
- if (pItem->pNode->pParam == NULL || taosArrayGetSize(pItem->pNode->pParam) != 1) {
+ if (pItem->pNode->Expr.paramList == NULL || taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
tSqlExpr* pParam = pParamItem->pNode;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2687,25 +2697,25 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_BLKINFO: {
// no parameters or more than one parameter for function
- if (pItem->pNode->pParam != NULL && taosArrayGetSize(pItem->pNode->pParam) != 0) {
+ if (pItem->pNode->Expr.paramList != NULL && taosArrayGetSize(pItem->pNode->Expr.paramList) != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = {.tableIndex = 0, .columnIndex = 0,};
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY};
int32_t inter = 0;
int16_t resType = 0;
int16_t bytes = 0;
getResultDataInfo(TSDB_DATA_TYPE_INT, 4, TSDB_FUNC_BLKINFO, 0, &resType, &bytes, &inter, 0, 0);
- s.bytes = bytes;
- s.type = (uint8_t)resType;
- SExprInfo* pExpr = tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, resType,
- bytes, getNewResColId(pCmd), bytes, 0);
+ SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY, .bytes = bytes};
+
+ SExprInfo* pExpr =
+ tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, resType, bytes, getNewResColId(pCmd), bytes, 0);
tstrncpy(pExpr->base.aliasName, s.name, sizeof(pExpr->base.aliasName));
SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
@@ -2739,8 +2749,8 @@ static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t col
}
void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength) {
- int32_t len = ((int32_t)pItem->pNode->token.n < nameLength) ? (int32_t)pItem->pNode->token.n : nameLength;
- strncpy(rawName, pItem->pNode->token.z, len);
+ int32_t len = ((int32_t)pItem->pNode->exprToken.n < nameLength) ? (int32_t)pItem->pNode->exprToken.n : nameLength;
+ strncpy(rawName, pItem->pNode->exprToken.z, len);
if (pItem->aliasName != NULL) {
int32_t aliasNameLen = (int32_t) strlen(pItem->aliasName);
@@ -2782,7 +2792,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken
return columnIndex;
}
-int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) {
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
@@ -2798,7 +2808,7 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQu
if (colIndex != COLUMN_INDEX_INITIAL_VAL) {
if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(msg, msg0);
} else {
pIndex->tableIndex = i;
pIndex->columnIndex = colIndex;
@@ -2813,7 +2823,7 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQu
}
if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(msg, msg1);
}
}
@@ -2863,7 +2873,7 @@ int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
return TSDB_CODE_SUCCESS;
}
-int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) {
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -2874,7 +2884,7 @@ int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo*
return TSDB_CODE_TSC_INVALID_OPERATION;
}
- return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex);
+ return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex, msg);
}
int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
@@ -3234,7 +3244,7 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
SStrToken token = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&token, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -3363,7 +3373,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
// TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
} else if (pExpr->tokenId == TK_IN) {
tVariant *pVal;
- if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType, timePrecision)) {
+ if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->Expr.paramList, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
@@ -3450,7 +3460,7 @@ typedef struct SCondExpr {
static int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t timePrecision);
static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) {
- SArray* pList = pExpr->pParam;
+ SArray* pList = pExpr->Expr.paramList;
int32_t size = (int32_t) taosArrayGetSize(pList);
if (size <= 0) {
@@ -3578,7 +3588,7 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
tSqlExpr* pLeft = pTableCond->pLeft;
tSqlExpr* pRight = pTableCond->pRight;
- if (!isTablenameToken(&pLeft->colInfo)) {
+ if (!isTablenameToken(&pLeft->columnName)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3615,7 +3625,7 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq
return getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
} else { // handle leaf node
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3644,7 +3654,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
@@ -3677,7 +3687,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
int16_t leftIdx = index.tableIndex;
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pExpr->pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
@@ -3747,7 +3757,7 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3777,7 +3787,7 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
// sql function list in selection clause.
// Append the sqlExpr into exprList of pQueryInfo structure sequentially
- pExpr->functionId = isValidFunction(pExpr->operand.z, pExpr->operand.n);
+ pExpr->functionId = isValidFunction(pExpr->Expr.operand.z, pExpr->Expr.operand.n);
if (pExpr->functionId < 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3796,6 +3806,7 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
uint64_t id = -1;
for(int32_t i = 0; i < inc; ++i) {
SExprInfo* p1 = tscExprGet(pQueryInfo, i + outputIndex);
+
int16_t t = p1->base.resType;
if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -3803,7 +3814,10 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
if (i == 0) {
id = p1->base.uid;
- } else if (id != p1->base.uid) {
+ continue;
+ }
+
+ if (id != p1->base.uid) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -3939,7 +3953,7 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr
SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &rightIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
@@ -4025,7 +4039,7 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
}
SSchema* pSchema = tscGetTableSchema(pTableMeta);
- if ((!isTablenameToken(&pLeft->colInfo)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) {
+ if ((!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) {
return invalidOperationMsg(msgBuf, msg2);
}
}
@@ -4049,7 +4063,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
int32_t ret = TSDB_CODE_SUCCESS;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -4092,7 +4106,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
int16_t leftIdx = index.tableIndex;
- if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -4255,7 +4269,7 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo*
tSqlExpr* pLeft = (*pExpr)->pLeft;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return;
}
@@ -4422,7 +4436,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -4505,7 +4519,7 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (left)", pQueryInfo);
}
@@ -4515,7 +4529,7 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]);
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (right)", pQueryInfo);
}
@@ -5154,7 +5168,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
- if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -5249,7 +5263,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pItem = taosArrayGet(pSqlNode->pSortOrder, 1);
tVariant* pVar2 = &pItem->pVar;
SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
- if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -5263,7 +5277,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
} else { // meter query
- if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
@@ -5283,17 +5297,30 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
if (isTopBottomQuery(pQueryInfo)) {
- /* order of top/bottom query in interval is not valid */
- SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
- assert(pExpr->base.functionId == TSDB_FUNC_TS);
+ bool validOrder = false;
+ SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
+ if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
+ SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
+ validOrder = (pColIndex->colIndex == index.columnIndex);
+ } else {
+ /* order of top/bottom query in interval is not valid */
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ assert(pExpr->base.functionId == TSDB_FUNC_TS);
+
+ pExpr = tscExprGet(pQueryInfo, 1);
+ if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ validOrder = true;
+ }
- pExpr = tscExprGet(pQueryInfo, 1);
- if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (!validOrder) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- }
+ }
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
+
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
return TSDB_CODE_SUCCESS;
}
@@ -5358,28 +5385,29 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return code;
}
+ char* pMsg = tscGetErrorMsgPayload(pCmd);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg20);
+ return invalidOperationMsg(pMsg, msg20);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(pMsg, msg3);
}
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(pMsg, msg4);
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) &&
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(pMsg, msg6);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
SArray* pFieldList = pAlterSQL->pAddColumns;
if (taosArrayGetSize(pFieldList) > 1) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(pMsg, msg5);
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
@@ -5390,31 +5418,31 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
if (tscGetNumOfTags(pTableMeta) == 1) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(pMsg, msg7);
}
// numOfTags == 1
if (taosArrayGetSize(pAlterSQL->varList) > 1) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ return invalidOperationMsg(pMsg, msg8);
}
tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0);
if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ return invalidOperationMsg(pMsg, msg9);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&name, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex < numOfCols) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(pMsg, msg10);
} else if (index.columnIndex == numOfCols) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
+ return invalidOperationMsg(pMsg, msg11);
}
char name1[128] = {0};
@@ -5432,24 +5460,24 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pDstItem = taosArrayGet(pAlterSQL->varList, 1);
if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ return invalidOperationMsg(pMsg, msg9);
}
if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(pMsg, msg10);
}
SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER;
SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER;
SStrToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
SStrToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg19);
+ if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex, tscGetErrorMsgPayload(pCmd)) == TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg19);
}
tVariantListItem* pItem = taosArrayGet(pVarList, 0);
@@ -5476,12 +5504,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = item->pVar.pz, .n = item->pVar.nLen};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMeta)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
+ return invalidOperationMsg(pMsg, msg12);
}
tVariantListItem* pItem = taosArrayGet(pVarList, 1);
@@ -5489,7 +5517,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg13);
+ return invalidOperationMsg(pMsg, msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
@@ -5497,14 +5525,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
varDataTLen(pAlterSQL->tagData.data) > pTagsSchema->bytes) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg14);
+ return invalidOperationMsg(pMsg, msg14);
}
int32_t schemaLen = sizeof(STColumn) * numOfTags;
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
+ tscError("0x%"PRIx64" failed to malloc for alter table pMsg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -5519,7 +5547,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pUpdateMsg->numOfTags = htons(numOfTags);
pUpdateMsg->schemaLen = htonl(schemaLen);
- // the schema is located after the msg body, then followed by true tag value
+ // the schema is located after the pMsg body, then followed by true tag value
char* d = pUpdateMsg->data;
SSchema* pTagCols = tscGetTableTagSchema(pTableMeta);
for (int i = 0; i < numOfTags; ++i) {
@@ -5532,7 +5560,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
d += sizeof(STColumn);
}
- // copy the tag value to msg body
+ // copy the tag value to pMsg body
pItem = taosArrayGet(pVarList, 1);
tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true);
@@ -5551,8 +5579,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
SArray* pFieldList = pAlterSQL->pAddColumns;
if (taosArrayGetSize(pFieldList) > 1) {
- const char* msg = "only support add one column";
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
+ const char* msgx = "only support add one column";
+ return invalidOperationMsg(pMsg, msgx);
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
@@ -5563,24 +5591,24 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
if (tscGetNumOfColumns(pTableMeta) == TSDB_MIN_COLUMNS) { //
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
+ return invalidOperationMsg(pMsg, msg15);
}
size_t size = taosArrayGetSize(pAlterSQL->varList);
if (size > 1) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
+ return invalidOperationMsg(pMsg, msg16);
}
tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg18);
+ return invalidOperationMsg(pMsg, msg18);
}
char name1[TSDB_COL_NAME_LEN] = {0};
@@ -5589,79 +5617,79 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
+ return invalidOperationMsg(pMsg, msg16);
}
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
+ return invalidOperationMsg(pMsg, msg21);
}
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
+ return invalidOperationMsg(pMsg, msg21);
}
if (pItem->type != pColSchema->type) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
+ return invalidOperationMsg(pMsg, msg23);
}
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
+ return invalidOperationMsg(pMsg, msg24);
}
if (pItem->bytes <= pColSchema->bytes) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
+ return invalidOperationMsg(pMsg, msg22);
}
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
}else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
+ return invalidOperationMsg(pMsg, msg16);
}
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
+ return invalidOperationMsg(pMsg, msg21);
}
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(pMsg, msg10);
}
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
+ return invalidOperationMsg(pMsg, msg21);
}
if (pItem->type != pColSchema->type) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
+ return invalidOperationMsg(pMsg, msg23);
}
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
+ return invalidOperationMsg(pMsg, msg24);
}
if (pItem->bytes <= pColSchema->bytes) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
+ return invalidOperationMsg(pMsg, msg22);
}
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
@@ -6515,6 +6543,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up";
+ const char* msg6 = "TWA/Diff/Derivative/Irate only support group by tbname";
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
@@ -6566,6 +6595,19 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
}
+ if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE)) {
+ for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
+ if (j == 0) {
+ if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
+ return invalidOperationMsg(msg, msg6);
+ }
+ } else if (!TSDB_COL_IS_TAG(pColIndex->flag)) {
+ return invalidOperationMsg(msg, msg6);
+ }
+ }
+ }
+
if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM &&
f != TSDB_FUNC_DIFF && f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) {
return invalidOperationMsg(msg, msg1);
@@ -6608,9 +6650,9 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
bool server_status = false;
tSqlExprItem* pExprItem = taosArrayGet(pExprList, 0);
tSqlExpr* pExpr = pExprItem->pNode;
- if (pExpr->operand.z == NULL) {
+ if (pExpr->Expr.operand.z == NULL) {
//handle 'select 1'
- if (pExpr->token.n == 1 && 0 == strncasecmp(pExpr->token.z, "1", 1)) {
+ if (pExpr->exprToken.n == 1 && 0 == strncasecmp(pExpr->exprToken.z, "1", 1)) {
server_status = true;
} else {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
@@ -6628,8 +6670,8 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
index = 2;
} else {
for (int32_t i = 0; i < tListLen(functionsInfo); ++i) {
- if (strncasecmp(functionsInfo[i].name, pExpr->token.z, functionsInfo[i].len) == 0 &&
- functionsInfo[i].len == pExpr->token.n) {
+ if (strncasecmp(functionsInfo[i].name, pExpr->exprToken.z, functionsInfo[i].len) == 0 &&
+ functionsInfo[i].len == pExpr->exprToken.n) {
index = i;
break;
}
@@ -6641,7 +6683,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->command = TSDB_SQL_CURRENT_DB;break;
case 1:
pQueryInfo->command = TSDB_SQL_SERV_VERSION;break;
- case 2:
+ case 2:
pQueryInfo->command = TSDB_SQL_SERV_STATUS;break;
case 3:
pQueryInfo->command = TSDB_SQL_CLI_VERSION;break;
@@ -6939,7 +6981,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (strncmp(sToken->z, pTagSchema[t].name, sToken->n) == 0 && strlen(pTagSchema[t].name) == sToken->n) {
SSchema* pSchema = &pTagSchema[t];
- char tagVal[TSDB_MAX_TAGS_LEN];
+ char tagVal[TSDB_MAX_TAGS_LEN] = {0};
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
if (pItem->pVar.nLen > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
@@ -7225,48 +7267,80 @@ int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
// TODO normalize the function expression and compare it
-int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pSqlExpr,
- SExprInfo** pExpr) {
- *pExpr = NULL;
+int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pSqlExpr, SExprInfo** pExpr) {
+ const char* msg1 = "invalid sql expression in having";
- size_t num = taosArrayGetSize(pSelectNodeList);
- for(int32_t i = 0; i < num; ++i) {
- tSqlExprItem* pItem = taosArrayGet(pSelectNodeList, i);
- if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it,
-
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- int32_t functionId = pSqlExpr->functionId;
- if (pSqlExpr->pParam == NULL) {
- index.columnIndex = 0;
- index.tableIndex = 0;
- } else {
- tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->pParam, 0);
- SStrToken* pToken = &pParamElem->pNode->colInfo;
- getColumnIndexByName(pCmd, pToken, pQueryInfo, &index);
- }
+ *pExpr = NULL;
+ size_t nx = tscNumOfExprs(pQueryInfo);
- size_t numOfNodeInSel = tscNumOfExprs(pQueryInfo);
- for(int32_t k = 0; k < numOfNodeInSel; ++k) {
- SExprInfo* pExpr1 = tscExprGet(pQueryInfo, k);
+ // parameters is needed for functions
+ if (pSqlExpr->Expr.paramList == NULL && pSqlExpr->functionId != TSDB_FUNC_COUNT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
- if (pExpr1->base.functionId != functionId) {
- continue;
- }
+ tSqlExprItem *pParam = NULL;
+ SSchema schema = {0};
- if (pExpr1->base.colInfo.colIndex != index.columnIndex) {
- continue;
- }
+ if (pSqlExpr->Expr.paramList != NULL) {
+ pParam = taosArrayGet(pSqlExpr->Expr.paramList, 0);
+ SStrToken* pToken = &pParam->pNode->columnName;
- ++pQueryInfo->havingFieldNum;
- *pExpr = pExpr1;
- break;
- }
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ } else {
+ schema = (SSchema) {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE};
+ }
- assert(*pExpr != NULL);
+ for(int32_t i = 0; i < nx; ++i) {
+ SExprInfo* pExprInfo = tscExprGet(pQueryInfo, i);
+ if (pExprInfo->base.functionId == pSqlExpr->functionId && pExprInfo->base.colInfo.colId == schema.colId) {
+ ++pQueryInfo->havingFieldNum;
+ *pExpr = pExprInfo;
return TSDB_CODE_SUCCESS;
}
}
+// size_t num = taosArrayGetSize(pSelectNodeList);
+// for(int32_t i = 0; i < num; ++i) {
+// tSqlExprItem* pItem = taosArrayGet(pSelectNodeList, i);
+//
+// if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it,
+//
+// SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+// int32_t functionId = pSqlExpr->functionId;
+// if (pSqlExpr->Expr.paramList == NULL) {
+// index.columnIndex = 0;
+// index.tableIndex = 0;
+// } else {
+// tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->Expr.paramList, 0);
+// SStrToken* pToken = &pParamElem->pNode->columnName;
+// getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+// }
+//
+// size_t numOfNodeInSel = tscNumOfExprs(pQueryInfo);
+// for(int32_t k = 0; k < numOfNodeInSel; ++k) {
+// SExprInfo* pExpr1 = tscExprGet(pQueryInfo, k);
+//
+// if (pExpr1->base.functionId != functionId) {
+// continue;
+// }
+//
+// if (pExpr1->base.colInfo.colIndex != index.columnIndex) {
+// continue;
+// }
+//
+// ++pQueryInfo->havingFieldNum;
+// *pExpr = pExpr1;
+// break;
+// }
+//
+// assert(*pExpr != NULL);
+// return TSDB_CODE_SUCCESS;
+// }
+// }
+
tSqlExprItem item = {.pNode = pSqlExpr, .aliasName = NULL, .distinct = false};
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
@@ -7410,10 +7484,10 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pLeft->pParam) {
- size_t size = taosArrayGetSize(pLeft->pParam);
+ if (pLeft->Expr.paramList) {
+ size_t size = taosArrayGetSize(pLeft->Expr.paramList);
for (int32_t i = 0; i < size; i++) {
- tSqlExprItem* pParamItem = taosArrayGet(pLeft->pParam, i);
+ tSqlExprItem* pParamItem = taosArrayGet(pLeft->Expr.paramList, i);
tSqlExpr* pExpr1 = pParamItem->pNode;
if (pExpr1->tokenId != TK_ALL &&
@@ -7424,13 +7498,13 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pExpr1->tokenId == TK_ID && (pExpr1->colInfo.z == NULL && pExpr1->colInfo.n == 0)) {
+ if (pExpr1->tokenId == TK_ID && (pExpr1->columnName.z == NULL && pExpr1->columnName.n == 0)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr1->tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(pCmd, &pExpr1->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
+ if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -7445,7 +7519,7 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
}
}
- pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
+ pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n);
if (pLeft->functionId < 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -7860,14 +7934,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
int32_t timeWindowQuery =
(TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap));
+ TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY);
- if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) !=
- TSDB_CODE_SUCCESS) {
+ // parse the group by clause in the first place
+ if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
- // parse the window_state
- if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) {
+ if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) !=
+ TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -7920,6 +7995,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
+ // parse the window_state
+ if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
if (isTimeWindowQuery(pQueryInfo)) {
// check if the first column of the nest query result is timestamp column
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
@@ -7933,6 +8014,17 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
}
+ // parse the having clause in the first place
+ int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
+ if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if ((code = validateLimitNode(pCmd, pQueryInfo, pSqlNode, pSql)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
// set order by info
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -7944,6 +8036,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
// updateFunctionInterBuf(pQueryInfo, false);
updateLastScanOrderIfNeeded(pQueryInfo);
+
+ if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
} else {
pQueryInfo->command = TSDB_SQL_SELECT;
@@ -7989,10 +8085,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
// parse the window_state
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
// set order by info
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) !=
TSDB_CODE_SUCCESS) {
@@ -8159,7 +8257,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
*pExpr = calloc(1, sizeof(tExprNode));
(*pExpr)->nodeType = TSQL_NODE_COL;
(*pExpr)->pSchema = calloc(1, sizeof(SSchema));
- strncpy((*pExpr)->pSchema->name, pSqlExpr->token.z, pSqlExpr->token.n);
+ strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n);
// set the input column data byte and type.
size_t size = taosArrayGetSize(pQueryInfo->exprList);
@@ -8181,7 +8279,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
} else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index);
+ int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -8225,7 +8323,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- if (serializeExprListToVariant(pSqlExpr->pParam, &pVal, colType, tinfo.precision) == false) {
+ if (serializeExprListToVariant(pSqlExpr->Expr.paramList, &pVal, colType, tinfo.precision) == false) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
}
*pExpr = calloc(1, sizeof(tExprNode));
@@ -8244,7 +8342,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
(*pExpr)->_node.pRight = pRight;
SStrToken t = {.type = pSqlExpr->tokenId};
- (*pExpr)->_node.optr = convertOptr(&t);
+ (*pExpr)->_node.optr = convertRelationalOperator(&t);
assert((*pExpr)->_node.optr != 0);
@@ -8283,3 +8381,39 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}
+
+#if 0
+void normalizeSqlNode(SSqlNode* pSqlNode, const char* dbName) {
+ assert(pSqlNode != NULL);
+
+ if (pSqlNode->from->type == SQL_NODE_FROM_TABLELIST) {
+// SRelElementPair *item = taosArrayGet(pSqlNode->from->list, 0);
+// item->TableName.name;
+ }
+
+ // 1. pSqlNode->pSelNodeList
+ if (pSqlNode->pSelNodeList != NULL && taosArrayGetSize(pSqlNode->pSelNodeList) > 0) {
+ SArray* pSelNodeList = pSqlNode->pSelNodeList;
+ size_t numOfExpr = taosArrayGetSize(pSelNodeList);
+ for (int32_t i = 0; i < numOfExpr; ++i) {
+ tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
+ int32_t type = pItem->pNode->type;
+ if (type == SQL_NODE_VALUE || type == SQL_NODE_EXPR) {
+ continue;
+ }
+
+ if (type == SQL_NODE_TABLE_COLUMN) {
+ }
+ }
+ }
+
+// 2. pSqlNode->pWhere
+// 3. pSqlNode->pHaving
+// 4. pSqlNode->pSortOrder
+
+// pSqlNode->from
+
+}
+
+#endif
+
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 45e552b4ffae3312e01bbd95766bb2616e96782f..c80d3960bc2a2a97d23c086284c5ba63fd52ec83 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -2309,7 +2309,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) {
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaInfo);
return 0;
}
@@ -2340,7 +2340,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaInfo);
}
return 0;
@@ -2398,11 +2398,12 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- if (pCmd->command == TSDB_SQL_RETRIEVE) {
- tscSetResRawPtr(pRes, pQueryInfo);
- } else if ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) {
- tscSetResRawPtr(pRes, pQueryInfo);
- } else if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
+ if ((pCmd->command == TSDB_SQL_RETRIEVE) ||
+ ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) ||
+ (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE))) {
tscSetResRawPtr(pRes, pQueryInfo);
}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 4d97fef52f956b6d550f24c1bb88a34dd64c6d13..8ab3512cba0a2bd2b220bade0830e8ea20c32f58 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -2450,6 +2450,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
if (ret != 0) {
pRes->code = ret;
tscAsyncResultOnError(pSql);
+ tfree(pDesc);
tfree(pMemoryBuf);
return ret;
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index 8b1d1889a88f92145398178079333cb51142b9b6..87296c70559b8a8cf4e9d9c3ff7d6acb04da132e 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -46,6 +46,7 @@ static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
char tscLogFileName[12] = "taoslog";
+int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
static volatile int tscInitRes = 0;
@@ -134,7 +135,7 @@ void taos_init_imp(void) {
}
sprintf(temp, "%s/%s", tsLogDir, tscLogFileName);
- if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) {
+ if (taosInitLog(temp, tsNumOfLogLines, tscLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 8a3e8cad32b50bd99a5668b40caea8e9faadd758..729fae3bfab6401e734f0df3d9f234d867b358a9 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -578,70 +578,72 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
- assert(pRes->numOfCols > 0);
+static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bool convertNchar) {
+ // generated the user-defined column result
+ if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) {
+ if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) {
+ setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
+ } else {
+ if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) {
+ assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes);
- int32_t offset = 0;
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
+ memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen);
+ varDataSetLen(p, pInfo->pExpr->base.param[1].nLen);
+ }
+ } else {
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
+ memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes);
+ }
+ }
+ }
- pRes->urow[i] = pRes->data + offset * pRes->numOfRows;
- pRes->length[i] = pInfo->field.bytes;
+ } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
+ // convert unicode to native code in a temporary buffer extra one byte for terminated symbol
+ pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
- offset += pInfo->field.bytes;
+ // string terminated char for binary data
+ memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
- // generated the user-defined column result
- if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) {
- if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) {
- setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
- } else {
- if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) {
- assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes);
+ char* p = pRes->urow[i];
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* dst = pRes->buffer[i] + k * pInfo->field.bytes;
- for (int32_t k = 0; k < pRes->numOfRows; ++k) {
- char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
+ if (isNull(p, TSDB_DATA_TYPE_NCHAR)) {
+ memcpy(dst, p, varDataTLen(p));
+ } else if (varDataLen(p) > 0) {
+ int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
+ varDataSetLen(dst, length);
- memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen);
- varDataSetLen(p, pInfo->pExpr->base.param[1].nLen);
- }
- } else {
- for (int32_t k = 0; k < pRes->numOfRows; ++k) {
- char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
- memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes);
- }
+ if (length == 0) {
+ tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
}
+ } else {
+ varDataSetLen(dst, 0);
}
- } else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
- // convert unicode to native code in a temporary buffer extra one byte for terminated symbol
- pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
-
- // string terminated char for binary data
- memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
+ p += pInfo->field.bytes;
+ }
- char* p = pRes->urow[i];
- for (int32_t k = 0; k < pRes->numOfRows; ++k) {
- char* dst = pRes->buffer[i] + k * pInfo->field.bytes;
+ memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ }
+}
- if (isNull(p, TSDB_DATA_TYPE_NCHAR)) {
- memcpy(dst, p, varDataTLen(p));
- } else if (varDataLen(p) > 0) {
- int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
- varDataSetLen(dst, length);
+void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
+ assert(pRes->numOfCols > 0);
- if (length == 0) {
- tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
- }
- } else {
- varDataSetLen(dst, 0);
- }
+ int32_t offset = 0;
+ for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
+ SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
- p += pInfo->field.bytes;
- }
+ pRes->urow[i] = pRes->data + offset * pRes->numOfRows;
+ pRes->length[i] = pInfo->field.bytes;
- memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
- }
+ offset += pInfo->field.bytes;
+ setResRawPtrImpl(pRes, pInfo, i, true);
}
}
@@ -656,8 +658,10 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
pRes->urow[i] = pColData->pData;
pRes->length[i] = pInfo->field.bytes;
+ setResRawPtrImpl(pRes, pInfo, i, convertNchar);
+ /*
// generated the user-defined column result
- if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) {
+ if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.ColName.flag)) {
if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) {
setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
} else {
@@ -706,7 +710,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
}
memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
- }
+ }*/
}
}
@@ -989,6 +993,8 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
pInfo->block = destroyOutputBuf(pInfo->block);
pInfo->pSql = NULL;
+ doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols);
+
cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
tfree(pInfo->pTableQueryInfo);
}
@@ -2176,27 +2182,31 @@ void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArr
SInternalField p = {.visible = pfield->visible, .field = pfield->field};
+ bool found = false;
int32_t resColId = pfield->pExpr->base.resColId;
for(int32_t j = 0; j < numOfExpr; ++j) {
SExprInfo* pExpr = taosArrayGetP(pExprList, j);
if (pExpr->base.resColId == resColId) {
p.pExpr = pExpr;
+ found = true;
break;
}
}
-// p.pExpr = calloc(1, sizeof(SExprInfo));
-// tscExprAssign(p.pExpr, pfield->pExpr);
+ if (!found) {
+ assert(pfield->pExpr->pExpr != NULL);
+ p.pExpr = calloc(1, sizeof(SExprInfo));
+ tscExprAssign(p.pExpr, pfield->pExpr);
+ }
+
taosArrayPush(pFieldInfo->internalField, &p);
}
}
}
-SExprInfo* tscExprCreate(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
-
SExprInfo* pExpr = calloc(1, sizeof(SExprInfo));
if (pExpr == NULL) {
return NULL;
@@ -2218,21 +2228,22 @@ SExprInfo* tscExprCreate(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde
} else if (functionId == TSDB_FUNC_BLKINFO) {
p->colInfo.colId = pColIndex->columnIndex;
p->colBytes = TSDB_MAX_BINARY_LEN;
- p->colType = TSDB_DATA_TYPE_BINARY;
+ p->colType = TSDB_DATA_TYPE_BINARY;
} else {
+ int32_t len = tListLen(p->colInfo.name);
if (TSDB_COL_IS_TAG(colType)) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
p->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
p->colBytes = pSchema[pColIndex->columnIndex].bytes;
p->colType = pSchema[pColIndex->columnIndex].type;
- tstrncpy(p->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(p->colInfo.name));
+ snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema[pColIndex->columnIndex].name);
} else if (pTableMetaInfo->pTableMeta != NULL) {
// in handling select database/version/server_status(), the pTableMeta is NULL
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex);
p->colInfo.colId = pSchema->colId;
p->colBytes = pSchema->bytes;
- p->colType = pSchema->type;
- tstrncpy(p->colInfo.name, pSchema->name, sizeof(p->colInfo.name));
+ p->colType = pSchema->type;
+ snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema->name);
}
}
@@ -2257,15 +2268,17 @@ SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t function
if (index == num) {
return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
}
-
- SExprInfo* pExpr = tscExprCreate(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
+
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
+ SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
return pExpr;
}
SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
- SExprInfo* pExpr = tscExprCreate(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
+ SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayPush(pQueryInfo->exprList, &pExpr);
return pExpr;
}
@@ -3044,6 +3057,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->fillType = pSrc->fillType;
pQueryInfo->fillVal = NULL;
pQueryInfo->clauseLimit = pSrc->clauseLimit;
+ pQueryInfo->prjOffset = pSrc->prjOffset;
pQueryInfo->numOfTables = 0;
pQueryInfo->window = pSrc->window;
pQueryInfo->sessionWindow = pSrc->sessionWindow;
@@ -3051,6 +3065,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->bufLen = pSrc->bufLen;
pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
+ pQueryInfo->arithmeticOnAgg = pSrc->arithmeticOnAgg;
pQueryInfo->buf = malloc(pSrc->bufLen);
if (pQueryInfo->buf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -3090,6 +3105,14 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
goto _error;
}
+ if (pQueryInfo->arithmeticOnAgg) {
+ pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
+ if (tscExprCopyAll(pQueryInfo->exprList1, pSrc->exprList1, true) != 0) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+ }
+
tscColumnListCopyAll(pQueryInfo->colList, pSrc->colList);
tscFieldInfoCopy(&pQueryInfo->fieldsInfo, &pSrc->fieldsInfo, pQueryInfo->exprList);
@@ -3414,6 +3437,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->fillType = pQueryInfo->fillType;
pNewQueryInfo->fillVal = NULL;
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
+ pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt
index 1a6c45aadef1989253c661c21a1d39f0f30fd1be..c4cd2f1dba23b31443c58054e4cfe7ce6112b0b8 100644
--- a/src/client/tests/CMakeLists.txt
+++ b/src/client/tests/CMakeLists.txt
@@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11
@@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread)
-ENDIF()
\ No newline at end of file
+ENDIF()
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 8c970595523bbc2a8c9c0fe9edd53dd1f3499bd5..b29a535ec2c80f7fb058e3d1c55e5d16ed71c3c4 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -46,7 +46,7 @@ typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
char token[TSDB_COL_NAME_LEN]; // original token
SColIndex colInfo;
- uint64_t uid; // refactor use the pointer
+ uint64_t uid; // table uid, todo refactor use the pointer
int16_t functionId; // function id in aAgg array
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index ec98915227e34223211341971f226fda64dc0504..913b0936dbde52e5c1ef12540889f24b61fe8ca1 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -180,15 +180,15 @@ int8_t tsEnableStream = 1;
int8_t tsCompactMnodeWal = 0;
int8_t tsPrintAuth = 0;
int8_t tscEmbedded = 0;
-char configDir[TSDB_FILENAME_LEN] = {0};
-char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsMnodeTmpDir[TSDB_FILENAME_LEN] = {0};
-char tsMnodeBakDir[TSDB_FILENAME_LEN] = {0};
-char tsDataDir[TSDB_FILENAME_LEN] = {0};
-char tsScriptDir[TSDB_FILENAME_LEN] = {0};
-char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
+char configDir[PATH_MAX] = {0};
+char tsVnodeDir[PATH_MAX] = {0};
+char tsDnodeDir[PATH_MAX] = {0};
+char tsMnodeDir[PATH_MAX] = {0};
+char tsMnodeTmpDir[PATH_MAX] = {0};
+char tsMnodeBakDir[PATH_MAX] = {0};
+char tsDataDir[PATH_MAX] = {0};
+char tsScriptDir[PATH_MAX] = {0};
+char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0;
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index c1c6ffa4b343dba47728055b50639ad12cfec9fe..72e2d42ff9bb8141d6bfc11dcc13ec470f9b09e1 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -256,7 +256,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) {
return -1;
}
- int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
+ int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index 1168eeb231fec49907b7ced9f019f55af2b085a8..9e0f7ffc7411ce5e2a5a81d0e58a67e231580d00 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -407,6 +407,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
wchar_t *pWStr = calloc(1, (nLen + 1) * TSDB_NCHAR_SIZE);
bool ret = taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE, NULL);
if (!ret) {
+ tfree(pWStr);
return -1;
}
@@ -606,7 +607,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
}
errno = 0; // reset global error code
- int64_t result;
+ int64_t result = 0;
switch (type) {
case TSDB_DATA_TYPE_BOOL: {
@@ -874,7 +875,8 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
free(pVariant->pz);
pVariant->dKey = v;
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
- pVariant->dKey = (double)(pVariant->i64);
+ double tmp = (double) pVariant->i64;
+ pVariant->dKey = tmp;
}
pVariant->nType = TSDB_DATA_TYPE_DOUBLE;
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index c80e1059b40b08b9eb592dc150974bef9746a1a9..2f83e5f6dc96f660162fdbda7fea034658b8cab7 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -117,7 +117,14 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
if (pMsg->pCont == NULL) return;
- if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) {
+ SRunStatus dnodeStatus = dnodeGetRunStatus();
+ if (dnodeStatus == TSDB_RUN_STATUS_STOPPED) {
+ dError("RPC %p, shell msg:%s is ignored since dnode exiting", pMsg->handle, taosMsg[pMsg->msgType]);
+ rpcMsg.code = TSDB_CODE_DND_EXITING;
+ rpcSendResponse(&rpcMsg);
+ rpcFreeCont(pMsg->pCont);
+ return;
+ } else if (dnodeStatus != TSDB_RUN_STATUS_RUNING) {
dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
rpcMsg.code = TSDB_CODE_APP_NOT_READY;
rpcSendResponse(&rpcMsg);
diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c
index 90bae8b9dd73ca80efab8090d5a189cf8c1a8966..daf62aac94a5e10a5899ad9c8593b5ff7df86f46 100644
--- a/src/dnode/src/dnodeVMgmt.c
+++ b/src/dnode/src/dnodeVMgmt.c
@@ -170,7 +170,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg);
- void *pVnode = vnodeAcquire(pAlter->cfg.vgId);
+ void *pVnode = vnodeAcquireNotClose(pAlter->cfg.vgId);
if (pVnode != NULL) {
dDebug("vgId:%d, alter vnode msg is received", pAlter->cfg.vgId);
int32_t code = vnodeAlter(pVnode, pAlter);
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index ea738661ce2813e13468ad91b4dc1d54775db21f..41016d7b99d049922e4de7dc0cbd3dafd2bc4ebf 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -63,7 +63,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
pHead->contLen = htonl(pHead->contLen);
assert(pHead->contLen > 0);
- void *pVnode = vnodeAcquire(pHead->vgId);
+ void *pVnode = vnodeAcquireNotClose(pHead->vgId);
if (pVnode != NULL) {
code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
if (code == TSDB_CODE_SUCCESS) queuedMsgNum++;
diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c
index 26084a52eb1806c4fdce592d47471d92ec3e1cdb..bbf257ff953779fd9d097ba82e1b42c0b91d1531 100644
--- a/src/dnode/src/dnodeVWrite.c
+++ b/src/dnode/src/dnodeVWrite.c
@@ -85,7 +85,7 @@ void dnodeDispatchToVWriteQueue(SRpcMsg *pRpcMsg) {
pMsg->vgId = htonl(pMsg->vgId);
pMsg->contLen = htonl(pMsg->contLen);
- void *pVnode = vnodeAcquire(pMsg->vgId);
+ void *pVnode = vnodeAcquireNotClose(pMsg->vgId);
if (pVnode == NULL) {
code = TSDB_CODE_VND_INVALID_VGROUP_ID;
} else {
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 7b53fcc6383a6f0a1c564cf25fedb03027af8aad..6fa9a41f1f2f8d121425fad47e4e93872f7ed00f 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -81,6 +81,8 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DEFAULT_USER "root"
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
+#elif (_TD_TQ_ == true)
+#define TSDB_DEFAULT_PASS "tqueue"
#else
#define TSDB_DEFAULT_PASS "taosdata"
#endif
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 431c9116ccec57d90a1dbe2405845f6c26a5fef6..1e996be8896478cccf431d8ee4bf1d4f00098539 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -198,6 +198,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length")
#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress")
#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories")
+#define TSDB_CODE_DND_EXITING TAOS_DEF_ERROR_CODE(0, 0x0406) //"Dnode is exiting"
// vnode
#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 61e1e88c13fdced4c172b4c276067ff398b5f70b..f20e1535bab725532bf8785cabdbe186448acc15 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -402,7 +402,7 @@ typedef struct SColIndex {
int16_t colId; // column id
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
uint16_t flag; // denote if it is a tag or a normal column
- char name[TSDB_COL_NAME_LEN]; // TODO remove it
+ char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1];
} SColIndex;
typedef struct SColumnFilterInfo {
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 9dae862344b90580d36fc9fbba67a27cf60edc50..f31a5e36e8ba95ec12e9166471c1edd7098e58ce 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -69,6 +69,7 @@ int32_t vnodeInitMgmt();
void vnodeCleanupMgmt();
void* vnodeAcquire(int32_t vgId);
void vnodeRelease(void *pVnode);
+void* vnodeAcquireNotClose(int32_t vgId);
void* vnodeGetWal(void *pVnode);
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
void vnodeBuildStatusMsg(void *pStatus);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index f6cb135dd11b20ff2b04b6b56e48c878b993b7e0..58f4b7ff02b673288878aa44671ba2a544556cc5 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -37,6 +37,13 @@ char PROMPT_HEADER[] = "power> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
+#elif (_TD_TQ_ == true)
+char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "tq> ";
+
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 4;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 93fac54233cf6e1c578f062552abb14bedb6e47b..6513f3e214d4c16ebf86ecdbb9ff1ca3debe0f59 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -571,6 +571,8 @@ SArguments g_args = {
"root", // user
#ifdef _TD_POWER_
"powerdb", // password
+ #elif (_TD_TQ_ == true)
+ "tqueue", // password
#else
"taosdata", // password
#endif
@@ -681,6 +683,11 @@ static void printHelp() {
"The password to use when connecting to the server. Default is 'powerdb'.");
printf("%s%s%s%s\n", indent, "-c", indent,
"Configuration directory. Default is '/etc/power/'.");
+#elif (_TD_TQ_ == true)
+ printf("%s%s%s%s\n", indent, "-P", indent,
+ "The password to use when connecting to the server. Default is 'tqueue'.");
+ printf("%s%s%s%s\n", indent, "-c", indent,
+ "Configuration directory. Default is '/etc/tq/'.");
#else
printf("%s%s%s%s\n", indent, "-P", indent,
"The password to use when connecting to the server. Default is 'taosdata'.");
@@ -4398,20 +4405,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+ // default value is -1, which mean infinite loop
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
cJSON* endAfterConsume =
cJSON_GetObjectItem(specifiedQuery, "endAfterConsume");
if (endAfterConsume
&& endAfterConsume->type == cJSON_Number) {
g_queryInfo.specifiedQueryInfo.endAfterConsume[j]
= endAfterConsume->valueint;
- } else if (!endAfterConsume) {
- // default value is -1, which mean infinite loop
- g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
}
+ if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
cJSON* resubAfterConsume =
cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume");
- g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
if ((resubAfterConsume)
&& (resubAfterConsume->type == cJSON_Number)
&& (resubAfterConsume->valueint >= 0)) {
@@ -4419,6 +4427,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
= resubAfterConsume->valueint;
}
+ if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
+
cJSON *result = cJSON_GetObjectItem(sql, "result");
if ((NULL != result) && (result->type == cJSON_String)
&& (result->valuestring != NULL)) {
@@ -4560,26 +4571,30 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
}
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
cJSON* superEndAfterConsume =
cJSON_GetObjectItem(superQuery, "endAfterConsume");
if (superEndAfterConsume
&& superEndAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.endAfterConsume =
superEndAfterConsume->valueint;
- } else if (!superEndAfterConsume) {
- // default value is -1, which mean do not resub
- g_queryInfo.superQueryInfo.endAfterConsume = -1;
}
+ if (g_queryInfo.superQueryInfo.endAfterConsume < -1)
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
cJSON* superResubAfterConsume =
cJSON_GetObjectItem(superQuery, "resubAfterConsume");
- g_queryInfo.superQueryInfo.resubAfterConsume = -1;
if ((superResubAfterConsume)
&& (superResubAfterConsume->type == cJSON_Number)
&& (superResubAfterConsume->valueint >= 0)) {
g_queryInfo.superQueryInfo.resubAfterConsume =
superResubAfterConsume->valueint;
}
+ if (g_queryInfo.superQueryInfo.resubAfterConsume < -1)
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
// supert table sqls
cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
@@ -4698,14 +4713,18 @@ PARSE_OVER:
return ret;
}
-static void prepareSampleData() {
+static int prepareSampleData() {
for (int i = 0; i < g_Dbs.dbCount; i++) {
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) {
- (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]);
+ if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) {
+ return -1;
+ }
}
}
}
+
+ return 0;
}
static void postFreeResource() {
@@ -4822,7 +4841,7 @@ static int64_t generateStbRowData(
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%"PRId64",", rand_bigint());
} else {
- errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
+ errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
}
}
@@ -4830,6 +4849,7 @@ static int64_t generateStbRowData(
dataLen -= 1;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
+ verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen);
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
return strlen(recBuf);
@@ -5082,24 +5102,25 @@ static int32_t generateStbDataTail(
} else {
tsRand = false;
}
- verbosePrint("%s() LN%d batch=%u\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n",
+ __func__, __LINE__, batch, remainderBufLen);
int32_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
- int64_t retLen = 0;
+ int64_t lenOfRow = 0;
if (tsRand) {
- retLen = generateStbRowData(superTblInfo, data,
+ lenOfRow = generateStbRowData(superTblInfo, data,
startTime + getTSRandTail(
superTblInfo->timeStampStep, k,
superTblInfo->disorderRatio,
superTblInfo->disorderRange)
);
} else {
- retLen = getRowDataFromSample(
+ lenOfRow = getRowDataFromSample(
data,
remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k,
@@ -5107,14 +5128,14 @@ static int32_t generateStbDataTail(
pSamplePos);
}
- if (retLen > remainderBufLen) {
+ if (lenOfRow > remainderBufLen) {
break;
}
- pstr += snprintf(pstr , retLen + 1, "%s", data);
+ pstr += snprintf(pstr , lenOfRow + 1, "%s", data);
k++;
- len += retLen;
- remainderBufLen -= retLen;
+ len += lenOfRow;
+ remainderBufLen -= lenOfRow;
verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
@@ -5903,11 +5924,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampMs();
if (recOfBatch == 0) {
- errorPrint("[%d] %s() LN%d try inserting records of batch is %d\n",
- pThreadInfo->threadID, __func__, __LINE__,
- recOfBatch);
- errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n");
- goto free_of_interlace;
+ errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n",
+ maxSqlLen, batchPerTbl);
+ goto free_of_interlace;
}
int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
@@ -6769,7 +6793,11 @@ static int insertTestProcess() {
}
// pretreatement
- prepareSampleData();
+ if (prepareSampleData() != 0) {
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
+ return -1;
+ }
double start;
double end;
@@ -7304,7 +7332,6 @@ static void *superSubscribe(void *sarg) {
TAOS_RES* res = NULL;
uint64_t st = 0, et = 0;
-
while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
|| (g_queryInfo.superQueryInfo.endAfterConsume >
consumed[pThreadInfo->end_table_to
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 33118ce3113eb401dcd9ba143e99ef359b07f935..b03d55730941ccb106284be294fc1aa82cb417d4 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -25,17 +25,22 @@
#include "tsclient.h"
#include "tsdb.h"
#include "tutil.h"
+#include
-#define COMMAND_SIZE 65536
+#define TSDB_SUPPORT_NANOSECOND 1
+
+#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
+#define COMMAND_SIZE 65536
+#define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql"
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
-int converStringToReadable(char *str, int size, char *buf, int bufsize);
-int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
-void taosDumpCharset(FILE *fp);
-void taosLoadFileCharset(FILE *fp, char *fcharset);
+static int converStringToReadable(char *str, int size, char *buf, int bufsize);
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
+static void taosDumpCharset(FILE *fp);
+static void taosLoadFileCharset(FILE *fp, char *fcharset);
typedef struct {
short bytes;
@@ -60,120 +65,120 @@ typedef struct {
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index {
- TSDB_SHOW_DB_NAME_INDEX,
- TSDB_SHOW_DB_CREATED_TIME_INDEX,
- TSDB_SHOW_DB_NTABLES_INDEX,
- TSDB_SHOW_DB_VGROUPS_INDEX,
- TSDB_SHOW_DB_REPLICA_INDEX,
- TSDB_SHOW_DB_QUORUM_INDEX,
- TSDB_SHOW_DB_DAYS_INDEX,
- TSDB_SHOW_DB_KEEP_INDEX,
- TSDB_SHOW_DB_CACHE_INDEX,
- TSDB_SHOW_DB_BLOCKS_INDEX,
- TSDB_SHOW_DB_MINROWS_INDEX,
- TSDB_SHOW_DB_MAXROWS_INDEX,
- TSDB_SHOW_DB_WALLEVEL_INDEX,
- TSDB_SHOW_DB_FSYNC_INDEX,
- TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_CACHELAST_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
- TSDB_SHOW_DB_UPDATE_INDEX,
- TSDB_SHOW_DB_STATUS_INDEX,
- TSDB_MAX_SHOW_DB
+ TSDB_SHOW_DB_NAME_INDEX,
+ TSDB_SHOW_DB_CREATED_TIME_INDEX,
+ TSDB_SHOW_DB_NTABLES_INDEX,
+ TSDB_SHOW_DB_VGROUPS_INDEX,
+ TSDB_SHOW_DB_REPLICA_INDEX,
+ TSDB_SHOW_DB_QUORUM_INDEX,
+ TSDB_SHOW_DB_DAYS_INDEX,
+ TSDB_SHOW_DB_KEEP_INDEX,
+ TSDB_SHOW_DB_CACHE_INDEX,
+ TSDB_SHOW_DB_BLOCKS_INDEX,
+ TSDB_SHOW_DB_MINROWS_INDEX,
+ TSDB_SHOW_DB_MAXROWS_INDEX,
+ TSDB_SHOW_DB_WALLEVEL_INDEX,
+ TSDB_SHOW_DB_FSYNC_INDEX,
+ TSDB_SHOW_DB_COMP_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_UPDATE_INDEX,
+ TSDB_SHOW_DB_STATUS_INDEX,
+ TSDB_MAX_SHOW_DB
};
// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
enum _show_tables_index {
- TSDB_SHOW_TABLES_NAME_INDEX,
- TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
- TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
- TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
- TSDB_MAX_SHOW_TABLES
+ TSDB_SHOW_TABLES_NAME_INDEX,
+ TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
+ TSDB_SHOW_TABLES_COLUMNS_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_TID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_MAX_SHOW_TABLES
};
// ---------------------------------- DESCRIBE METRIC CONFIGURE ------------------------------
enum _describe_table_index {
- TSDB_DESCRIBE_METRIC_FIELD_INDEX,
- TSDB_DESCRIBE_METRIC_TYPE_INDEX,
- TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
- TSDB_DESCRIBE_METRIC_NOTE_INDEX,
- TSDB_MAX_DESCRIBE_METRIC
+ TSDB_DESCRIBE_METRIC_FIELD_INDEX,
+ TSDB_DESCRIBE_METRIC_TYPE_INDEX,
+ TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
+ TSDB_DESCRIBE_METRIC_NOTE_INDEX,
+ TSDB_MAX_DESCRIBE_METRIC
};
#define COL_NOTE_LEN 128
typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
- char type[16];
- int length;
- char note[COL_NOTE_LEN];
+ char field[TSDB_COL_NAME_LEN + 1];
+ char type[16];
+ int length;
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- SColDes cols[];
+ char name[TSDB_TABLE_NAME_LEN];
+ SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
- char name[TSDB_DB_NAME_LEN];
- char create_time[32];
- int32_t ntables;
- int32_t vgroups;
- int16_t replica;
- int16_t quorum;
- int16_t days;
- char keeplist[32];
- //int16_t daysToKeep;
- //int16_t daysToKeep1;
- //int16_t daysToKeep2;
- int32_t cache; //MB
- int32_t blocks;
- int32_t minrows;
- int32_t maxrows;
- int8_t wallevel;
- int32_t fsync;
- int8_t comp;
- int8_t cachelast;
- char precision[8]; // time resolution
- int8_t update;
- char status[16];
+ char name[TSDB_DB_NAME_LEN];
+ char create_time[32];
+ int32_t ntables;
+ int32_t vgroups;
+ int16_t replica;
+ int16_t quorum;
+ int16_t days;
+ char keeplist[32];
+ //int16_t daysToKeep;
+ //int16_t daysToKeep1;
+ //int16_t daysToKeep2;
+ int32_t cache; //MB
+ int32_t blocks;
+ int32_t minrows;
+ int32_t maxrows;
+ int8_t wallevel;
+ int32_t fsync;
+ int8_t comp;
+ int8_t cachelast;
+ char precision[8]; // time resolution
+ int8_t update;
+ char status[16];
} SDbInfo;
typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- char metric[TSDB_TABLE_NAME_LEN];
+ char name[TSDB_TABLE_NAME_LEN];
+ char metric[TSDB_TABLE_NAME_LEN];
} STableRecord;
typedef struct {
- bool isMetric;
- STableRecord tableRecord;
+ bool isMetric;
+ STableRecord tableRecord;
} STableRecordInfo;
typedef struct {
- pthread_t threadID;
- int32_t threadIndex;
- int32_t totalThreads;
- char dbName[TSDB_DB_NAME_LEN];
- void *taosCon;
- int64_t rowsOfDumpOut;
- int64_t tablesOfDumpOut;
+ pthread_t threadID;
+ int32_t threadIndex;
+ int32_t totalThreads;
+ char dbName[TSDB_DB_NAME_LEN];
+ void *taosCon;
+ int64_t rowsOfDumpOut;
+ int64_t tablesOfDumpOut;
} SThreadParaObj;
typedef struct {
- int64_t totalRowsOfDumpOut;
- int64_t totalChildTblsOfDumpOut;
- int32_t totalSuperTblsOfDumpOut;
- int32_t totalDatabasesOfDumpOut;
+ int64_t totalRowsOfDumpOut;
+ int64_t totalChildTblsOfDumpOut;
+ int32_t totalSuperTblsOfDumpOut;
+ int32_t totalDatabasesOfDumpOut;
} resultStatistics;
-static int64_t totalDumpOutRows = 0;
+static int64_t g_totalDumpOutRows = 0;
-SDbInfo **dbInfos = NULL;
+SDbInfo **g_dbInfos = NULL;
const char *argp_program_version = version;
const char *argp_program_bug_address = "";
@@ -194,1468 +199,1655 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat
/* The options we understand. */
static struct argp_option options[] = {
- // connection option
- {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
- {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
- #ifdef _TD_POWER_
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
- #else
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
- #endif
- {"port", 'P', "PORT", 0, "Port to connect", 0},
- {"cversion", 'v', "CVERION", 0, "client version", 0},
- {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
- // input/output file
- {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
- {"inpath", 'i', "INPATH", 0, "Input file path.", 1},
- {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
- #ifdef _TD_POWER_
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
- #else
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
- #endif
- {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
- // dump unit options
- {"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'D', 0, 0, "Dump assigned databases", 2},
- // dump format options
- {"schemaonly", 's', 0, 0, "Only dump schema.", 3},
- {"without-property", 'N', 0, 0, "Dump schema without properties.", 3},
- {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
- {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
- {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
- {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
- {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
- {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
- {"debug", 'g', 0, 0, "Print debug info.", 1},
- {"verbose", 'v', 0, 0, "Print verbose debug info.", 1},
- {0}};
+ // connection option
+ {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
+ {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
+#ifdef _TD_POWER_
+ {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
+#else
+ {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
+#endif
+ {"port", 'P', "PORT", 0, "Port to connect", 0},
+ {"cversion", 'v', "CVERION", 0, "client version", 0},
+ {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
+ // input/output file
+ {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
+ {"inpath", 'i', "INPATH", 0, "Input file path.", 1},
+ {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
+#ifdef _TD_POWER_
+ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
+#else
+ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
+#endif
+ {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
+ // dump unit options
+ {"all-databases", 'A', 0, 0, "Dump all databases.", 2},
+ {"databases", 'D', 0, 0, "Dump assigned databases", 2},
+ {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2},
+ // dump format options
+ {"schemaonly", 's', 0, 0, "Only dump schema.", 2},
+ {"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
+ {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
+ {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
+ {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
+#if TSDB_SUPPORT_NANOSECOND == 1
+ {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
+#else
+ {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
+#endif
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
+ {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
+ {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
+ {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
+ {"debug", 'g', 0, 0, "Print debug info.", 8},
+ {"verbose", 'b', 0, 0, "Print verbose debug info.", 9},
+ {"performanceprint", 'm', 0, 0, "Print performance debug info.", 10},
+ {0}
+};
/* Used by main to communicate with parse_opt. */
typedef struct arguments {
- // connection option
- char *host;
- char *user;
- char *password;
- uint16_t port;
- char cversion[12];
- uint16_t mysqlFlag;
- // output file
- char outpath[TSDB_FILENAME_LEN+1];
- char inpath[TSDB_FILENAME_LEN+1];
- // result file
- char *resultFile;
- char *encode;
- // dump unit option
- bool all_databases;
- bool databases;
- // dump format option
- bool schemaonly;
- bool with_property;
- int64_t start_time;
- int64_t end_time;
- int32_t data_batch;
- int32_t max_sql_len;
- int32_t table_batch; // num of table which will be dump into one output file.
- bool allow_sys;
- // other options
- int32_t thread_num;
- int abort;
- char **arg_list;
- int arg_list_len;
- bool isDumpIn;
- bool debug_print;
- bool verbose_print;
- bool performance_print;
-} SArguments;
-
-/* Parse a single option. */
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- /* Get the input argument from argp_parse, which we
- know is a pointer to our arguments structure. */
- struct arguments *arguments = state->input;
- wordexp_t full_path;
-
- switch (key) {
// connection option
- case 'a':
- arguments->allow_sys = true;
- break;
- case 'h':
- arguments->host = arg;
- break;
- case 'u':
- arguments->user = arg;
- break;
- case 'p':
- arguments->password = arg;
- break;
- case 'P':
- arguments->port = atoi(arg);
- break;
- case 'q':
- arguments->mysqlFlag = atoi(arg);
- break;
- case 'v':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid client vesion %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->cversion, full_path.we_wordv[0], 11);
- wordfree(&full_path);
- break;
- // output file path
- case 'o':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'g':
- arguments->debug_print = true;
- break;
- case 'i':
- arguments->isDumpIn = true;
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'r':
- arguments->resultFile = arg;
- break;
- case 'c':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid path %s\n", arg);
- return -1;
- }
- tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN);
- wordfree(&full_path);
- break;
- case 'e':
- arguments->encode = arg;
- break;
+ char *host;
+ char *user;
+ char *password;
+ uint16_t port;
+ char cversion[12];
+ uint16_t mysqlFlag;
+ // output file
+ char outpath[MAX_FILE_NAME_LEN];
+ char inpath[MAX_FILE_NAME_LEN];
+ // result file
+ char *resultFile;
+ char *encode;
// dump unit option
- case 'A':
- arguments->all_databases = true;
- break;
- case 'D':
- arguments->databases = true;
- break;
+ bool all_databases;
+ bool databases;
// dump format option
- case 's':
- arguments->schemaonly = true;
- break;
- case 'N':
- arguments->with_property = false;
- break;
- case 'S':
- // parse time here.
- arguments->start_time = atol(arg);
- break;
- case 'E':
- arguments->end_time = atol(arg);
- break;
- case 'B':
- arguments->data_batch = atoi(arg);
- if (arguments->data_batch >= INT16_MAX) {
- arguments->data_batch = INT16_MAX - 1;
- }
- break;
- case 'L':
- {
- int32_t len = atoi(arg);
- if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
- len = TSDB_MAX_ALLOWED_SQL_LEN;
- } else if (len < TSDB_MAX_SQL_LEN) {
- len = TSDB_MAX_SQL_LEN;
- }
- arguments->max_sql_len = len;
- break;
- }
- case 't':
- arguments->table_batch = atoi(arg);
- break;
- case 'T':
- arguments->thread_num = atoi(arg);
- break;
- case OPT_ABORT:
- arguments->abort = 1;
- break;
- case ARGP_KEY_ARG:
- arguments->arg_list = &state->argv[state->next - 1];
- arguments->arg_list_len = state->argc - state->next + 1;
- state->next = state->argc;
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
+ bool schemaonly;
+ bool with_property;
+ bool avro;
+ int64_t start_time;
+ int64_t end_time;
+ char precision[8];
+ int32_t data_batch;
+ int32_t max_sql_len;
+ int32_t table_batch; // num of table which will be dump into one output file.
+ bool allow_sys;
+ // other options
+ int32_t thread_num;
+ int abort;
+ char **arg_list;
+ int arg_list_len;
+ bool isDumpIn;
+ bool debug_print;
+ bool verbose_print;
+ bool performance_print;
+} SArguments;
/* Our argp parser. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state);
+
static struct argp argp = {options, parse_opt, args_doc, doc};
static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-static int taosDumpOut(struct arguments *arguments);
-static int taosDumpIn(struct arguments *arguments);
-static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp);
-static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon);
-static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName);
-static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName);
-static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName);
-static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName);
-static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName);
+static int taosDumpOut();
+static int taosDumpIn();
+static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
+ FILE *fp);
+static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon);
+static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon,
+ char* dbName);
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName);
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
+ int numOfCols, FILE *fp, char* dbName);
+static int32_t taosDumpTable(char *table, char *metric,
+ FILE *fp, TAOS* taosCon, char* dbName);
+static int taosDumpTableData(FILE *fp, char *tbName,
+ TAOS* taosCon, char* dbName,
+ char *jsonAvroSchema);
static int taosCheckParam(struct arguments *arguments);
static void taosFreeDbInfos();
-static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
+static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName);
struct arguments g_args = {
- // connection option
- NULL,
- "root",
- #ifdef _TD_POWER_
- "powerdb",
- #else
- "taosdata",
- #endif
- 0,
- "",
- 0,
- // outpath and inpath
- "",
- "",
- "./dump_result.txt",
- NULL,
- // dump unit option
- false,
- false,
- // dump format option
- false, // schemeonly
- true, // with_property
- 0,
- INT64_MAX,
- 1,
- TSDB_MAX_SQL_LEN,
- 1,
- false,
- // other options
- 5,
- 0,
- NULL,
- 0,
- false,
- false, // debug_print
- false, // verbose_print
- false // performance_print
+ // connection option
+ NULL,
+ "root",
+#ifdef _TD_POWER_
+ "powerdb",
+#else
+ "taosdata",
+#endif
+ 0,
+ "",
+ 0,
+ // outpath and inpath
+ "",
+ "",
+ "./dump_result.txt",
+ NULL,
+ // dump unit option
+ false,
+ false,
+ // dump format option
+ false, // schemeonly
+ true, // with_property
+ false, // avro format
+ -INT64_MAX, // start_time
+ INT64_MAX, // end_time
+ "ms", // precision
+ 1, // data_batch
+ TSDB_MAX_SQL_LEN, // max_sql_len
+ 1, // table_batch
+ false, // allow_sys
+ // other options
+ 5, // thread_num
+ 0, // abort
+ NULL, // arg_list
+ 0, // arg_list_len
+ false, // isDumpIn
+ false, // debug_print
+ false, // verbose_print
+ false // performance_print
};
+/* Parse a single option. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state) {
+ /* Get the input argument from argp_parse, which we
+ know is a pointer to our arguments structure. */
+ wordexp_t full_path;
+
+ switch (key) {
+ // connection option
+ case 'a':
+ g_args.allow_sys = true;
+ break;
+ case 'h':
+ g_args.host = arg;
+ break;
+ case 'u':
+ g_args.user = arg;
+ break;
+ case 'p':
+ g_args.password = arg;
+ break;
+ case 'P':
+ g_args.port = atoi(arg);
+ break;
+ case 'q':
+ g_args.mysqlFlag = atoi(arg);
+ break;
+ case 'v':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid client vesion %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.cversion, full_path.we_wordv[0], 11);
+ wordfree(&full_path);
+ break;
+ // output file path
+ case 'o':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.outpath, full_path.we_wordv[0],
+ MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'g':
+ g_args.debug_print = true;
+ break;
+ case 'i':
+ g_args.isDumpIn = true;
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(g_args.inpath, full_path.we_wordv[0],
+ MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'r':
+ g_args.resultFile = arg;
+ break;
+ case 'c':
+ if (wordexp(arg, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", arg);
+ return -1;
+ }
+ tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN);
+ wordfree(&full_path);
+ break;
+ case 'e':
+ g_args.encode = arg;
+ break;
+ // dump unit option
+ case 'A':
+ g_args.all_databases = true;
+ break;
+ case 'D':
+ g_args.databases = true;
+ break;
+ // dump format option
+ case 's':
+ g_args.schemaonly = true;
+ break;
+ case 'N':
+ g_args.with_property = false;
+ break;
+ case 'V':
+ g_args.avro = true;
+ break;
+ case 'S':
+ // parse time here.
+ g_args.start_time = atol(arg);
+ break;
+ case 'E':
+ g_args.end_time = atol(arg);
+ break;
+ case 'B':
+ g_args.data_batch = atoi(arg);
+ if (g_args.data_batch > MAX_RECORDS_PER_REQ) {
+ g_args.data_batch = MAX_RECORDS_PER_REQ;
+ }
+ break;
+ case 'L':
+ {
+ int32_t len = atoi(arg);
+ if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
+ len = TSDB_MAX_ALLOWED_SQL_LEN;
+ } else if (len < TSDB_MAX_SQL_LEN) {
+ len = TSDB_MAX_SQL_LEN;
+ }
+ g_args.max_sql_len = len;
+ break;
+ }
+ case 't':
+ g_args.table_batch = atoi(arg);
+ break;
+ case 'T':
+ g_args.thread_num = atoi(arg);
+ break;
+ case OPT_ABORT:
+ g_args.abort = 1;
+ break;
+ case ARGP_KEY_ARG:
+ g_args.arg_list = &state->argv[state->next - 1];
+ g_args.arg_list_len = state->argc - state->next + 1;
+ state->next = state->argc;
+ break;
+
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
static int queryDbImpl(TAOS *taos, char *command) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
+ int i;
+ TAOS_RES *res = NULL;
+ int32_t code = -1;
+
+ for (i = 0; i < 5; i++) {
+ if (NULL != res) {
+ taos_free_result(res);
+ res = NULL;
+ }
- for (i = 0; i < 5; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
+ res = taos_query(taos, command);
+ code = taos_errno(res);
+ if (0 == code) {
+ break;
+ }
}
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
+ if (code != 0) {
+ errorPrint("Failed to run <%s>, reason: %s\n", command, taos_errstr(res));
+ taos_free_result(res);
+ //taos_close(taos);
+ return -1;
}
- }
- if (code != 0) {
- fprintf(stderr, "Failed to run <%s>, reason: %s\n", command, taos_errstr(res));
taos_free_result(res);
- //taos_close(taos);
- return -1;
- }
+ return 0;
+}
- taos_free_result(res);
- return 0;
+static void parse_precision_first(
+ int argc, char *argv[], SArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-C") == 0) {
+ if (NULL == argv[i+1]) {
+ errorPrint("%s need a valid value following!\n", argv[i]);
+ exit(-1);
+ }
+ char *tmp = strdup(argv[i+1]);
+ if (tmp == NULL) {
+ errorPrint("%s() LN%d, strdup() cannot allocate memory\n",
+ __func__, __LINE__);
+ exit(-1);
+ }
+ if ((0 != strncasecmp(tmp, "ms", strlen("ms")))
+ && (0 != strncasecmp(tmp, "us", strlen("us")))
+#if TSDB_SUPPORT_NANOSECOND == 1
+ && (0 != strncasecmp(tmp, "ns", strlen("ns")))
+#endif
+ ) {
+ //
+ errorPrint("input precision: %s is invalid value\n", tmp);
+ free(tmp);
+ exit(-1);
+ }
+ strncpy(g_args.precision, tmp, strlen(tmp));
+ free(tmp);
+ }
+ }
}
-static void parse_args(int argc, char *argv[], SArguments *arguments) {
- for (int i = 1; i < argc; i++) {
- if ((strcmp(argv[i], "-S") == 0)
- || (strcmp(argv[i], "-E") == 0)) {
- if (argv[i+1]) {
- char *tmp = strdup(argv[++i]);
-
- if (tmp) {
- int64_t tmpEpoch;
- if (strchr(tmp, ':') && strchr(tmp, '-')) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
- fprintf(stderr, "Input end time error!\n");
- free(tmp);
- return;
+static void parse_timestamp(
+ int argc, char *argv[], SArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if ((strcmp(argv[i], "-S") == 0)
+ || (strcmp(argv[i], "-E") == 0)) {
+ if (NULL == argv[i+1]) {
+ errorPrint("%s need a valid value following!\n", argv[i]);
+ exit(-1);
+ }
+ char *tmp = strdup(argv[i+1]);
+ if (NULL == tmp) {
+ errorPrint("%s() LN%d, strdup() cannot allocate memory\n",
+ __func__, __LINE__);
+ exit(-1);
}
- } else {
- tmpEpoch = atoll(tmp);
- }
- sprintf(argv[i], "%"PRId64"", tmpEpoch);
- debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
- __func__, __LINE__, tmp, i, argv[i]);
+ int64_t tmpEpoch;
+ if (strchr(tmp, ':') && strchr(tmp, '-')) {
+ int32_t timePrec;
+ if (0 == strncasecmp(arguments->precision,
+ "ms", strlen("ms"))) {
+ timePrec = TSDB_TIME_PRECISION_MILLI;
+ } else if (0 == strncasecmp(arguments->precision,
+ "us", strlen("us"))) {
+ timePrec = TSDB_TIME_PRECISION_MICRO;
+#if TSDB_SUPPORT_NANOSECOND == 1
+ } else if (0 == strncasecmp(arguments->precision,
+ "ns", strlen("ns"))) {
+ timePrec = TSDB_TIME_PRECISION_NANO;
+#endif
+ } else {
+ errorPrint("Invalid time precision: %s",
+ arguments->precision);
+ free(tmp);
+ return;
+ }
+
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ tmp, &tmpEpoch, strlen(tmp),
+ timePrec, 0)) {
+ errorPrint("Input %s, end time error!\n", tmp);
+ free(tmp);
+ return;
+ }
+ } else {
+ tmpEpoch = atoll(tmp);
+ }
- free(tmp);
- } else {
- errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
- exit(-1);
+ sprintf(argv[i], "%"PRId64"", tmpEpoch);
+ debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
+ __func__, __LINE__, tmp, i, argv[i]);
+ free(tmp);
}
- } else {
- errorPrint("%s need a valid value following!\n", argv[i]);
- exit(-1);
- }
- } else if (strcmp(argv[i], "-g") == 0) {
- arguments->debug_print = true;
}
- }
}
int main(int argc, char *argv[]) {
- /* Parse our arguments; every option seen by parse_opt will be
- reflected in arguments. */
- if (argc > 2)
- parse_args(argc, argv, &g_args);
+ int ret = 0;
+ /* Parse our arguments; every option seen by parse_opt will be
+ reflected in arguments. */
+ if (argc > 2) {
+ parse_precision_first(argc, argv, &g_args);
+ parse_timestamp(argc, argv, &g_args);
+ }
- argp_parse(&argp, argc, argv, 0, 0, &g_args);
+ argp_parse(&argp, argc, argv, 0, 0, &g_args);
- if (g_args.abort) {
- #ifndef _ALPINE
- error(10, 0, "ABORTED");
- #else
- abort();
- #endif
- }
+ if (g_args.abort) {
+#ifndef _ALPINE
+ error(10, 0, "ABORTED");
+#else
+ abort();
+#endif
+ }
- printf("====== arguments config ======\n");
- {
- printf("host: %s\n", g_args.host);
- printf("user: %s\n", g_args.user);
- printf("password: %s\n", g_args.password);
- printf("port: %u\n", g_args.port);
- printf("cversion: %s\n", g_args.cversion);
- printf("mysqlFlag: %d\n", g_args.mysqlFlag);
- printf("outpath: %s\n", g_args.outpath);
- printf("inpath: %s\n", g_args.inpath);
- printf("resultFile: %s\n", g_args.resultFile);
- printf("encode: %s\n", g_args.encode);
- printf("all_databases: %d\n", g_args.all_databases);
- printf("databases: %d\n", g_args.databases);
- printf("schemaonly: %d\n", g_args.schemaonly);
- printf("with_property: %d\n", g_args.with_property);
- printf("start_time: %" PRId64 "\n", g_args.start_time);
- printf("end_time: %" PRId64 "\n", g_args.end_time);
- printf("data_batch: %d\n", g_args.data_batch);
- printf("max_sql_len: %d\n", g_args.max_sql_len);
- printf("table_batch: %d\n", g_args.table_batch);
- printf("thread_num: %d\n", g_args.thread_num);
- printf("allow_sys: %d\n", g_args.allow_sys);
- printf("abort: %d\n", g_args.abort);
- printf("isDumpIn: %d\n", g_args.isDumpIn);
- printf("arg_list_len: %d\n", g_args.arg_list_len);
- printf("debug_print: %d\n", g_args.debug_print);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ printf("====== arguments config ======\n");
+ {
+ printf("host: %s\n", g_args.host);
+ printf("user: %s\n", g_args.user);
+ printf("password: %s\n", g_args.password);
+ printf("port: %u\n", g_args.port);
+ printf("cversion: %s\n", g_args.cversion);
+ printf("mysqlFlag: %d\n", g_args.mysqlFlag);
+ printf("outpath: %s\n", g_args.outpath);
+ printf("inpath: %s\n", g_args.inpath);
+ printf("resultFile: %s\n", g_args.resultFile);
+ printf("encode: %s\n", g_args.encode);
+ printf("all_databases: %s\n", g_args.all_databases?"true":"false");
+ printf("databases: %d\n", g_args.databases);
+ printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
+ printf("with_property: %s\n", g_args.with_property?"true":"false");
+ printf("avro format: %s\n", g_args.avro?"true":"false");
+ printf("start_time: %" PRId64 "\n", g_args.start_time);
+ printf("end_time: %" PRId64 "\n", g_args.end_time);
+ printf("precision: %s\n", g_args.precision);
+ printf("data_batch: %d\n", g_args.data_batch);
+ printf("max_sql_len: %d\n", g_args.max_sql_len);
+ printf("table_batch: %d\n", g_args.table_batch);
+ printf("thread_num: %d\n", g_args.thread_num);
+ printf("allow_sys: %d\n", g_args.allow_sys);
+ printf("abort: %d\n", g_args.abort);
+ printf("isDumpIn: %d\n", g_args.isDumpIn);
+ printf("arg_list_len: %d\n", g_args.arg_list_len);
+ printf("debug_print: %d\n", g_args.debug_print);
+
+ for (int32_t i = 0; i < g_args.arg_list_len; i++) {
+ printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ }
}
- }
- printf("==============================\n");
+ printf("==============================\n");
- if (g_args.cversion[0] != 0){
- tstrncpy(version, g_args.cversion, 11);
- }
+ if (g_args.cversion[0] != 0){
+ tstrncpy(version, g_args.cversion, 11);
+ }
- if (taosCheckParam(&g_args) < 0) {
- exit(EXIT_FAILURE);
- }
+ if (taosCheckParam(&g_args) < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ g_fpOfResult = fopen(g_args.resultFile, "a");
+ if (NULL == g_fpOfResult) {
+ errorPrint("Failed to open %s for save result\n", g_args.resultFile);
+ exit(-1);
+ };
- g_fpOfResult = fopen(g_args.resultFile, "a");
- if (NULL == g_fpOfResult) {
- fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile);
- return 1;
- };
-
- fprintf(g_fpOfResult, "#############################################################################\n");
- fprintf(g_fpOfResult, "============================== arguments config =============================\n");
- {
- fprintf(g_fpOfResult, "host: %s\n", g_args.host);
- fprintf(g_fpOfResult, "user: %s\n", g_args.user);
- fprintf(g_fpOfResult, "password: %s\n", g_args.password);
- fprintf(g_fpOfResult, "port: %u\n", g_args.port);
- fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
- fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
- fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
- fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
- fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
- fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
- fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases);
- fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
- fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly);
- fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property);
- fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
- fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
- fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
- fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
- fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
- fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
- fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
- fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
- fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
- fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ fprintf(g_fpOfResult, "#############################################################################\n");
+ fprintf(g_fpOfResult, "============================== arguments config =============================\n");
+ {
+ fprintf(g_fpOfResult, "host: %s\n", g_args.host);
+ fprintf(g_fpOfResult, "user: %s\n", g_args.user);
+ fprintf(g_fpOfResult, "password: %s\n", g_args.password);
+ fprintf(g_fpOfResult, "port: %u\n", g_args.port);
+ fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
+ fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
+ fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
+ fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
+ fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
+ fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
+ fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false");
+ fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
+ fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
+ fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
+ fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
+ fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
+ fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
+ fprintf(g_fpOfResult, "precision: %s\n", g_args.precision);
+ fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
+ fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
+ fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
+ fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
+ fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
+ fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
+ fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
+ fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
+
+ for (int32_t i = 0; i < g_args.arg_list_len; i++) {
+ fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
+ }
}
- }
- g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
-
- time_t tTime = time(NULL);
- struct tm tm = *localtime(&tTime);
-
- if (g_args.isDumpIn) {
- fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
- fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (taosDumpIn(&g_args) < 0) {
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
- return -1;
- }
- } else {
- fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
- fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (taosDumpOut(&g_args) < 0) {
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
- return -1;
- }
-
- fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
- fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut);
- fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut);
- }
+ g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
+ time_t tTime = time(NULL);
+ struct tm tm = *localtime(&tTime);
- return 0;
+ if (g_args.isDumpIn) {
+ fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
+ fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (taosDumpIn() < 0) {
+ ret = -1;
+ }
+ } else {
+ fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
+ fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (taosDumpOut() < 0) {
+ ret = -1;
+ } else {
+ fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
+ fprintf(g_fpOfResult, "# total database count: %d\n",
+ g_resultStatistics.totalDatabasesOfDumpOut);
+ fprintf(g_fpOfResult, "# total super table count: %d\n",
+ g_resultStatistics.totalSuperTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n",
+ g_resultStatistics.totalChildTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# total row count: %"PRId64"\n",
+ g_resultStatistics.totalRowsOfDumpOut);
+ }
+ }
+
+ fprintf(g_fpOfResult, "\n");
+ fclose(g_fpOfResult);
+
+ return ret;
}
-void taosFreeDbInfos() {
- if (dbInfos == NULL) return;
- for (int i = 0; i < 128; i++) tfree(dbInfos[i]);
- tfree(dbInfos);
+static void taosFreeDbInfos() {
+ if (g_dbInfos == NULL) return;
+ for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]);
+ tfree(g_dbInfos);
}
// check table is normal table or super table
-int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) {
- TAOS_ROW row = NULL;
- bool isSet = false;
- TAOS_RES *result = NULL;
+static int taosGetTableRecordInfo(
+ char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) {
+ TAOS_ROW row = NULL;
+ bool isSet = false;
+ TAOS_RES *result = NULL;
+
+ memset(pTableRecordInfo, 0, sizeof(STableRecordInfo));
+
+ char* tempCommand = (char *)malloc(COMMAND_SIZE);
+ if (tempCommand == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- memset(pTableRecordInfo, 0, sizeof(STableRecordInfo));
+ sprintf(tempCommand, "show tables like %s", table);
- char* tempCommand = (char *)malloc(COMMAND_SIZE);
- if (tempCommand == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+ result = taos_query(taosCon, tempCommand);
+ int32_t code = taos_errno(result);
- sprintf(tempCommand, "show tables like %s", table);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tempCommand);
+ free(tempCommand);
+ taos_free_result(result);
+ return -1;
+ }
- result = taos_query(taosCon, tempCommand);
- int32_t code = taos_errno(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+
+ while ((row = taos_fetch_row(result)) != NULL) {
+ isSet = true;
+ pTableRecordInfo->isMetric = false;
+ strncpy(pTableRecordInfo->tableRecord.name,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ strncpy(pTableRecordInfo->tableRecord.metric,
+ (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+ break;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tempCommand);
- free(tempCommand);
taos_free_result(result);
- return -1;
- }
+ result = NULL;
- TAOS_FIELD *fields = taos_fetch_fields(result);
-
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isMetric = false;
- strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
- break;
- }
+ if (isSet) {
+ free(tempCommand);
+ return 0;
+ }
- taos_free_result(result);
- result = NULL;
+ sprintf(tempCommand, "show stables like %s", table);
- if (isSet) {
- free(tempCommand);
- return 0;
- }
+ result = taos_query(taosCon, tempCommand);
+ code = taos_errno(result);
- sprintf(tempCommand, "show stables like %s", table);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tempCommand);
+ free(tempCommand);
+ taos_free_result(result);
+ return -1;
+ }
- result = taos_query(taosCon, tempCommand);
- code = taos_errno(result);
+ while ((row = taos_fetch_row(result)) != NULL) {
+ isSet = true;
+ pTableRecordInfo->isMetric = true;
+ tstrncpy(pTableRecordInfo->tableRecord.metric, table,
+ TSDB_TABLE_NAME_LEN);
+ break;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tempCommand);
- free(tempCommand);
taos_free_result(result);
- return -1;
- }
-
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isMetric = true;
- tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN);
- break;
- }
-
- taos_free_result(result);
- result = NULL;
+ result = NULL;
- if (isSet) {
+ if (isSet) {
+ free(tempCommand);
+ return 0;
+ }
+ errorPrint("%s() LN%d, invalid table/metric %s\n",
+ __func__, __LINE__, table);
free(tempCommand);
- return 0;
- }
- fprintf(stderr, "invalid table/metric %s\n", table);
- free(tempCommand);
- return -1;
+ return -1;
}
-int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) {
- STableRecord tableRecord;
+static int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter,
+ char* metric, int* fd) {
+ STableRecord tableRecord;
- if (-1 == *fd) {
- *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (*fd == -1) {
- fprintf(stderr, "failed to open temp file: .tables.tmp.0\n");
- return -1;
+ if (-1 == *fd) {
+ *fd = open(".tables.tmp.0",
+ O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (*fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: .tables.tmp.0\n",
+ __func__, __LINE__);
+ return -1;
+ }
}
- }
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
- tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
+ tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
- taosWrite(*fd, &tableRecord, sizeof(STableRecord));
- return 0;
+ taosWrite(*fd, &tableRecord, sizeof(STableRecord));
+ return 0;
}
+static int32_t taosSaveTableOfMetricToTempFile(
+ TAOS *taosCon, char* metric,
+ int32_t* totalNumOfThread) {
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
-int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) {
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
-
- char* tmpCommand = (char *)malloc(COMMAND_SIZE);
- if (tmpCommand == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+ char* tmpCommand = (char *)malloc(COMMAND_SIZE);
+ if (tmpCommand == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ return -1;
+ }
- sprintf(tmpCommand, "select tbname from %s", metric);
+ sprintf(tmpCommand, "select tbname from %s", metric);
- TAOS_RES *res = taos_query(taosCon, tmpCommand);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command %s\n", tmpCommand);
+ TAOS_RES *res = taos_query(taosCon, tmpCommand);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, tmpCommand);
+ free(tmpCommand);
+ taos_free_result(res);
+ return -1;
+ }
free(tmpCommand);
- taos_free_result(res);
- return -1;
- }
- free(tmpCommand);
-
- char tmpBuf[TSDB_FILENAME_LEN + 1];
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".select-tbname.tmp");
- fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- taos_free_result(res);
- return -1;
- }
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ char tmpBuf[MAX_FILE_NAME_LEN];
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ taos_free_result(res);
+ return -1;
+ }
- int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
- tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+ int32_t numOfTable = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
- numOfTable++;
- }
- taos_free_result(res);
- lseek(fd, 0, SEEK_SET);
-
- int maxThreads = arguments->thread_num;
- int tableOfPerFile ;
- if (numOfTable <= arguments->thread_num) {
- tableOfPerFile = 1;
- maxThreads = numOfTable;
- } else {
- tableOfPerFile = numOfTable / arguments->thread_num;
- if (0 != numOfTable % arguments->thread_num) {
- tableOfPerFile += 1;
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
+ tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ numOfTable++;
}
- }
+ taos_free_result(res);
+ lseek(fd, 0, SEEK_SET);
- char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
- if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
- close(fd);
- return -1;
- }
+ int maxThreads = g_args.thread_num;
+ int tableOfPerFile ;
+ if (numOfTable <= g_args.thread_num) {
+ tableOfPerFile = 1;
+ maxThreads = numOfTable;
+ } else {
+ tableOfPerFile = numOfTable / g_args.thread_num;
+ if (0 != numOfTable % g_args.thread_num) {
+ tableOfPerFile += 1;
+ }
+ }
- int32_t numOfThread = *totalNumOfThread;
- int subFd = -1;
- for (; numOfThread < maxThreads; numOfThread++) {
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
- subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (subFd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
- (void)remove(tmpBuf);
- }
- sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
- free(tblBuf);
- close(fd);
- return -1;
+ char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
+ if (NULL == tblBuf){
+ errorPrint("%s() LN%d, failed to calloc %" PRIzu "\n",
+ __func__, __LINE__, tableOfPerFile * sizeof(STableRecord));
+ close(fd);
+ return -1;
}
- // read tableOfPerFile for fd, write to subFd
- ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
- if (readLen <= 0) {
- close(subFd);
- break;
+ int32_t numOfThread = *totalNumOfThread;
+ int subFd = -1;
+ for (; numOfThread < maxThreads; numOfThread++) {
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
+ subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (subFd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ (void)remove(tmpBuf);
+ free(tblBuf);
+ close(fd);
+ return -1;
+ }
+
+ // read tableOfPerFile for fd, write to subFd
+ ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
+ if (readLen <= 0) {
+ close(subFd);
+ break;
+ }
+ taosWrite(subFd, tblBuf, readLen);
+ close(subFd);
}
- taosWrite(subFd, tblBuf, readLen);
- close(subFd);
- }
- sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ sprintf(tmpBuf, ".select-tbname.tmp");
+ (void)remove(tmpBuf);
- if (fd >= 0) {
- close(fd);
- fd = -1;
- }
+ if (fd >= 0) {
+ close(fd);
+ fd = -1;
+ }
- *totalNumOfThread = numOfThread;
+ *totalNumOfThread = numOfThread;
- free(tblBuf);
- return 0;
+ free(tblBuf);
+ return 0;
}
-int taosDumpOut(struct arguments *arguments) {
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
- char *command = NULL;
-
- TAOS_ROW row;
- FILE *fp = NULL;
- int32_t count = 0;
- STableRecordInfo tableRecordInfo;
-
- char tmpBuf[TSDB_FILENAME_LEN+9] = {0};
- if (arguments->outpath[0] != 0) {
- sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath);
- } else {
- sprintf(tmpBuf, "dbs.sql");
- }
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- return -1;
- }
+static int taosDumpOut() {
+ TAOS *taos = NULL;
+ TAOS_RES *result = NULL;
+ char *command = NULL;
- dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *));
- if (dbInfos == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
+ TAOS_ROW row;
+ FILE *fp = NULL;
+ int32_t count = 0;
+ STableRecordInfo tableRecordInfo;
- command = (char *)malloc(COMMAND_SIZE);
- if (command == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
+ char tmpBuf[4096] = {0};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
+ } else {
+ sprintf(tmpBuf, "dbs.sql");
+ }
- /* Connect to server */
- taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port);
- if (taos == NULL) {
- fprintf(stderr, "failed to connect to TDengine server\n");
- goto _exit_failure;
- }
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
- /* --------------------------------- Main Code -------------------------------- */
- /* if (arguments->databases || arguments->all_databases) { // dump part of databases or all databases */
- /* */
- taosDumpCharset(fp);
+ g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *));
+ if (g_dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ goto _exit_failure;
+ }
- sprintf(command, "show databases");
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
+ command = (char *)malloc(COMMAND_SIZE);
+ if (command == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ goto _exit_failure;
+ }
- if (code != 0) {
- fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
- goto _exit_failure;
- }
+ /* Connect to server */
+ taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ goto _exit_failure;
+ }
- TAOS_FIELD *fields = taos_fetch_fields(result);
+ /* --------------------------------- Main Code -------------------------------- */
+ /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
+ /* */
+ taosDumpCharset(fp);
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 &&
- (!arguments->allow_sys))
- continue;
+ sprintf(command, "show databases");
+ result = taos_query(taos, command);
+ int32_t code = taos_errno(result);
- if (arguments->databases) { // input multi dbs
- for (int i = 0; arguments->arg_list[i]; i++) {
- if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
- }
- continue;
- } else if (!arguments->all_databases) { // only input one db
- if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
- else
- continue;
- }
-
- _dump_db_point:
-
- dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (dbInfos[count] == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- goto _exit_failure;
- }
-
- strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
- if (arguments->with_property) {
- dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
- //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
- //dbInfos[count]->daysToKeep1;
- //dbInfos[count]->daysToKeep2;
- dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
- //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
- dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- }
- count++;
-
- if (arguments->databases) {
- if (count > arguments->arg_list_len) break;
-
- } else if (!arguments->all_databases) {
- if (count >= 1) break;
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(result));
+ goto _exit_failure;
}
- }
- if (count == 0) {
- fprintf(stderr, "No databases valid to dump\n");
- goto _exit_failure;
- }
+ TAOS_FIELD *fields = taos_fetch_fields(result);
- if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases
- for (int i = 0; i < count; i++) {
- taosDumpDb(dbInfos[i], arguments, fp, taos);
- }
- } else {
- if (arguments->arg_list_len == 1) { // case: taosdump
- taosDumpDb(dbInfos[0], arguments, fp, taos);
- } else { // case: taosdump tablex tabley ...
- taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp);
- fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfos[0]->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+ while ((row = taos_fetch_row(result)) != NULL) {
+ // sys database name : 'log', but subsequent version changed to 'log'
+ if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ && (!g_args.allow_sys)) {
+ continue;
+ }
- sprintf(command, "use %s", dbInfos[0]->name);
+ if (g_args.databases) { // input multi dbs
+ for (int i = 0; g_args.arg_list[i]; i++) {
+ if (strncasecmp(g_args.arg_list[i],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ goto _dump_db_point;
+ }
+ continue;
+ } else if (!g_args.all_databases) { // only input one db
+ if (strncasecmp(g_args.arg_list[0],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ goto _dump_db_point;
+ else
+ continue;
+ }
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
- goto _exit_failure;
- }
+_dump_db_point:
- fprintf(fp, "USE %s;\n\n", dbInfos[0]->name);
+ g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (g_dbInfos[count] == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
+ goto _exit_failure;
+ }
- int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0
- int normalTblFd = -1;
- int32_t retCode;
- int superTblCnt = 0 ;
- for (int i = 1; arguments->arg_list[i]; i++) {
- if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) {
- fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]);
- continue;
+ strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ if (g_args.with_property) {
+ g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ g_dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
+ //g_dbInfos[count]->daysToKeep1;
+ //g_dbInfos[count]->daysToKeep2;
+ g_dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ g_dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ g_dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ g_dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ g_dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ g_dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
+ g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
+ count++;
- if (tableRecordInfo.isMetric) { // dump all table of this metric
- int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
- if (0 == ret) {
- superTblCnt++;
- }
- retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread);
- } else {
- if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric
- int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
- if (0 == ret) {
- superTblCnt++;
- }
- }
- retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
+ if (g_args.databases) {
+ if (count > g_args.arg_list_len) break;
+
+ } else if (!g_args.all_databases) {
+ if (count >= 1) break;
}
+ }
+
+ if (count == 0) {
+ errorPrint("%d databases valid to dump\n", count);
+ goto _exit_failure;
+ }
- if (retCode < 0) {
- if (-1 != normalTblFd){
- taosClose(normalTblFd);
- }
- goto _clean_tmp_file;
+ if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases
+ for (int i = 0; i < count; i++) {
+ taosDumpDb(g_dbInfos[i], fp, taos);
}
- }
+ } else {
+ if (g_args.arg_list_len == 1) { // case: taosdump
+ taosDumpDb(g_dbInfos[0], fp, taos);
+ } else { // case: taosdump tablex tabley ...
+ taosDumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ g_dbInfos[0]->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
+
+ sprintf(command, "use %s", g_dbInfos[0]->name);
+
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("invalid database %s\n", g_dbInfos[0]->name);
+ goto _exit_failure;
+ }
- // TODO: save dump super table into result_output.txt
- fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name);
+
+ int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0
+ int normalTblFd = -1;
+ int32_t retCode;
+ int superTblCnt = 0 ;
+ for (int i = 1; g_args.arg_list[i]; i++) {
+ if (taosGetTableRecordInfo(g_args.arg_list[i],
+ &tableRecordInfo, taos) < 0) {
+ errorPrint("input the invalide table %s\n",
+ g_args.arg_list[i]);
+ continue;
+ }
+
+ if (tableRecordInfo.isMetric) { // dump all table of this metric
+ int ret = taosDumpStable(
+ tableRecordInfo.tableRecord.metric,
+ fp, taos, g_dbInfos[0]->name);
+ if (0 == ret) {
+ superTblCnt++;
+ }
+ retCode = taosSaveTableOfMetricToTempFile(
+ taos, tableRecordInfo.tableRecord.metric,
+ &totalNumOfThread);
+ } else {
+ if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric
+ int ret = taosDumpStable(
+ tableRecordInfo.tableRecord.metric,
+ fp, taos, g_dbInfos[0]->name);
+ if (0 == ret) {
+ superTblCnt++;
+ }
+ }
+ retCode = taosSaveAllNormalTableToTempFile(
+ taos, tableRecordInfo.tableRecord.name,
+ tableRecordInfo.tableRecord.metric, &normalTblFd);
+ }
+
+ if (retCode < 0) {
+ if (-1 != normalTblFd){
+ taosClose(normalTblFd);
+ }
+ goto _clean_tmp_file;
+ }
+ }
- if (-1 != normalTblFd){
- taosClose(normalTblFd);
- }
+ // TODO: save dump super table into result_output.txt
+ fprintf(g_fpOfResult, "# super table counter: %d\n",
+ superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- // start multi threads to dumpout
- taosStartDumpOutWorkThreads(taos, arguments, totalNumOfThread, dbInfos[0]->name);
+ if (-1 != normalTblFd){
+ taosClose(normalTblFd);
+ }
- char tmpFileName[TSDB_FILENAME_LEN + 1];
- _clean_tmp_file:
- for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) {
- sprintf(tmpFileName, ".tables.tmp.%d", loopCnt);
- remove(tmpFileName);
- }
+ // start multi threads to dumpout
+ taosStartDumpOutWorkThreads(totalNumOfThread,
+ g_dbInfos[0]->name);
+
+ char tmpFileName[MAX_FILE_NAME_LEN];
+_clean_tmp_file:
+ for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) {
+ sprintf(tmpFileName, ".tables.tmp.%d", loopCnt);
+ remove(tmpFileName);
+ }
+ }
}
- }
- /* Close the handle and return */
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- tfree(command);
- taosFreeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
- return 0;
+ /* Close the handle and return */
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ tfree(command);
+ taosFreeDbInfos();
+ fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return 0;
_exit_failure:
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- tfree(command);
- taosFreeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
- return -1;
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ tfree(command);
+ taosFreeDbInfos();
+ errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return -1;
}
-int taosGetTableDes(
+static int taosGetTableDes(
char* dbName, char *table,
STableDef *tableDes, TAOS* taosCon, bool isSuperTable) {
- TAOS_ROW row = NULL;
- TAOS_RES* res = NULL;
- int count = 0;
+ TAOS_ROW row = NULL;
+ TAOS_RES* res = NULL;
+ int count = 0;
- char sqlstr[COMMAND_SIZE];
- sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ char sqlstr[COMMAND_SIZE];
+ sprintf(sqlstr, "describe %s.%s;", dbName, table);
- tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
- while ((row = taos_fetch_row(res)) != NULL) {
- strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
- tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
-
- count++;
- }
+ res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- taos_free_result(res);
- res = NULL;
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
+ while ((row = taos_fetch_row(res)) != NULL) {
+ strncpy(tableDes->cols[count].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ strncpy(tableDes->cols[count].type,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
+ tableDes->cols[count].length =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ strncpy(tableDes->cols[count].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+
+ count++;
+ }
- if (isSuperTable) {
- return count;
- }
+ taos_free_result(res);
+ res = NULL;
- // if chidl-table have tag, using select tagName from table to get tagValue
- for (int i = 0 ; i < count; i++) {
- if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
+ if (isSuperTable) {
+ return count;
+ }
+ // if chidl-table have tag, using select tagName from table to get tagValue
+ for (int i = 0 ; i < count; i++) {
+ if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
- sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
- res = taos_query(taosCon, sqlstr);
- code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
+ sprintf(sqlstr, "select %s from %s.%s",
+ tableDes->cols[i].field, dbName, table);
- fields = taos_fetch_fields(res);
+ res = taos_query(taosCon, sqlstr);
+ code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- row = taos_fetch_row(res);
- if (NULL == row) {
- fprintf(stderr, " fetch failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
+ fields = taos_fetch_fields(res);
- if (row[0] == NULL) {
- sprintf(tableDes->cols[i].note, "%s", "NULL");
- taos_free_result(res);
- res = NULL;
- continue;
- }
+ row = taos_fetch_row(res);
+ if (NULL == row) {
+ errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- int32_t* length = taos_fetch_lengths(res);
+ if (row[0] == NULL) {
+ sprintf(tableDes->cols[i].note, "%s", "NULL");
+ taos_free_result(res);
+ res = NULL;
+ continue;
+ }
- //int32_t* length = taos_fetch_lengths(tmpResult);
- switch (fields[0].type) {
- case TSDB_DATA_TYPE_BOOL:
- sprintf(tableDes->cols[i].note, "%d", ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_INT:
- sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
- break;
- case TSDB_DATA_TYPE_BINARY: {
- memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- tableDes->cols[i].note[0] = '\'';
- char tbuf[COL_NOTE_LEN];
- converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
- char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
- *(pstr++) = '\'';
- break;
- }
- case TSDB_DATA_TYPE_NCHAR: {
- memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
- convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
- sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
- #if 0
- if (!arguments->mysqlFlag) {
- sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[0]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000));
+ int32_t* length = taos_fetch_lengths(res);
+
+ //int32_t* length = taos_fetch_lengths(tmpResult);
+ switch (fields[0].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ sprintf(tableDes->cols[i].note, "%d",
+ ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
+ tableDes->cols[i].note[0] = '\'';
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
+ char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
+ *(pstr++) = '\'';
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
+ sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
+#if 0
+ if (!g_args.mysqlFlag) {
+ sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[0]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000));
+ }
+#endif
+ break;
+ default:
+ break;
}
- #endif
- break;
- default:
- break;
+
+ taos_free_result(res);
+ res = NULL;
}
- taos_free_result(res);
- res = NULL;
- }
+ return count;
+}
- return count;
+static int convertSchemaToAvroSchema(STableDef *tableDes, char **avroSchema)
+{
+ errorPrint("%s() LN%d TODO: covert table schema to avro schema\n",
+ __func__, __LINE__);
+ return 0;
}
-int32_t taosDumpTable(
- char *table, char *metric, struct arguments *arguments,
+static int32_t taosDumpTable(
+ char *table, char *metric,
FILE *fp, TAOS* taosCon, char* dbName) {
- int count = 0;
+ int count = 0;
- STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef)
+ + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table
- /*
- count = taosGetTableDes(metric, tableDes, taosCon);
+ if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table
+ /*
+ count = taosGetTableDes(metric, tableDes, taosCon);
- if (count < 0) {
- free(tableDes);
- return -1;
- }
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
- taosDumpCreateTableClause(tableDes, count, fp);
+ taosDumpCreateTableClause(tableDes, count, fp);
- memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- */
+ memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ */
- count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
+ count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
- if (count < 0) {
- free(tableDes);
- return -1;
- }
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
- // create child-table using super-table
- taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName);
+ // create child-table using super-table
+ taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName);
- } else { // dump table definition
- count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
+ } else { // dump table definition
+ count = taosGetTableDes(dbName, table, tableDes, taosCon, false);
- if (count < 0) {
- free(tableDes);
- return -1;
+ if (count < 0) {
+ free(tableDes);
+ return -1;
+ }
+
+ // create normal-table or super-table
+ taosDumpCreateTableClause(tableDes, count, fp, dbName);
}
- // create normal-table or super-table
- taosDumpCreateTableClause(tableDes, count, fp, dbName);
- }
+ char *jsonAvroSchema = NULL;
+ if (g_args.avro) {
+ convertSchemaToAvroSchema(tableDes, &jsonAvroSchema);
+ }
+
+ free(tableDes);
- free(tableDes);
+ int32_t ret = 0;
+ if (!g_args.schemaonly) {
+ ret = taosDumpTableData(fp, table, taosCon, dbName,
+ jsonAvroSchema);
+ }
- return taosDumpTableData(fp, table, arguments, taosCon, dbName);
+ return ret;
}
-void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
-
- char *pstr = sqlstr;
- pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
- if (isDumpProperty) {
- pstr += sprintf(pstr,
- "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
- dbInfo->comp, dbInfo->precision, dbInfo->update);
- }
+static void taosDumpCreateDbClause(
+ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+
+ char *pstr = sqlstr;
+ pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
+ if (isDumpProperty) {
+ pstr += sprintf(pstr,
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days,
+ dbInfo->keeplist,
+ dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
+ dbInfo->fsync,
+ dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
+ }
- pstr += sprintf(pstr, ";");
- fprintf(fp, "%s\n\n", sqlstr);
+ pstr += sprintf(pstr, ";");
+ fprintf(fp, "%s\n\n", sqlstr);
}
-void* taosDumpOutWorkThreadFp(void *arg)
+static void* taosDumpOutWorkThreadFp(void *arg)
{
- SThreadParaObj *pThread = (SThreadParaObj*)arg;
- STableRecord tableRecord;
- int fd;
-
- char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
- sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
- fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpBuf);
- return NULL;
- }
-
- FILE *fp = NULL;
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+ SThreadParaObj *pThread = (SThreadParaObj*)arg;
+ STableRecord tableRecord;
+ int fd;
+
+ char tmpBuf[4096] = {0};
+ sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
+ fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
+ }
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
- }
+ FILE *fp = NULL;
+ memset(tmpBuf, 0, 4096);
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- close(fd);
- return NULL;
- }
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.tables.%d.sql",
+ g_args.outpath, pThread->dbName, pThread->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.tables.%d.sql",
+ pThread->dbName, pThread->threadIndex);
+ }
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, "use %s", pThread->dbName);
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ close(fd);
+ return NULL;
+ }
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
- int32_t code = taos_errno(tmpResult);
- if (code != 0) {
- fprintf(stderr, "invalid database %s\n", pThread->dbName);
- taos_free_result(tmpResult);
- fclose(fp);
- close(fd);
- return NULL;
- }
+ memset(tmpBuf, 0, 4096);
+ sprintf(tmpBuf, "use %s", pThread->dbName);
- int fileNameIndex = 1;
- int tablesInOneFile = 0;
- int64_t lastRowsPrint = 5000000;
- fprintf(fp, "USE %s;\n\n", pThread->dbName);
- while (1) {
- ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
- if (readLen <= 0) break;
-
- int ret = taosDumpTable(
- tableRecord.name, tableRecord.metric, &g_args,
- fp, pThread->taosCon, pThread->dbName);
- if (ret >= 0) {
- // TODO: sum table count and table rows by self
- pThread->tablesOfDumpOut++;
- pThread->rowsOfDumpOut += ret;
-
- if (pThread->rowsOfDumpOut >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from database %s\n",
- pThread->rowsOfDumpOut, pThread->dbName);
- lastRowsPrint += 5000000;
- }
-
- tablesInOneFile++;
- if (tablesInOneFile >= g_args.table_batch) {
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+ int32_t code = taos_errno(tmpResult);
+ if (code != 0) {
+ errorPrint("%s() LN%d, invalid database %s. reason: %s\n",
+ __func__, __LINE__, pThread->dbName, taos_errstr(tmpResult));
+ taos_free_result(tmpResult);
fclose(fp);
- tablesInOneFile = 0;
+ close(fd);
+ return NULL;
+ }
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
- g_args.outpath, pThread->dbName,
- pThread->threadIndex, fileNameIndex);
- } else {
- sprintf(tmpBuf, "%s.tables.%d-%d.sql",
- pThread->dbName, pThread->threadIndex, fileNameIndex);
- }
- fileNameIndex++;
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- fprintf(stderr, "failed to open file %s\n", tmpBuf);
- close(fd);
- taos_free_result(tmpResult);
- return NULL;
+#if 0
+ int fileNameIndex = 1;
+ int tablesInOneFile = 0;
+#endif
+ int64_t lastRowsPrint = 5000000;
+ fprintf(fp, "USE %s;\n\n", pThread->dbName);
+ while (1) {
+ ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
+ if (readLen <= 0) break;
+
+ int ret = taosDumpTable(
+ tableRecord.name, tableRecord.metric,
+ fp, pThread->taosCon, pThread->dbName);
+ if (ret >= 0) {
+ // TODO: sum table count and table rows by self
+ pThread->tablesOfDumpOut++;
+ pThread->rowsOfDumpOut += ret;
+
+ if (pThread->rowsOfDumpOut >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from database %s\n",
+ pThread->rowsOfDumpOut, pThread->dbName);
+ lastRowsPrint += 5000000;
+ }
+
+#if 0
+ tablesInOneFile++;
+ if (tablesInOneFile >= g_args.table_batch) {
+ fclose(fp);
+ tablesInOneFile = 0;
+
+ memset(tmpBuf, 0, 4096);
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql",
+ g_args.outpath, pThread->dbName,
+ pThread->threadIndex, fileNameIndex);
+ } else {
+ sprintf(tmpBuf, "%s.tables.%d-%d.sql",
+ pThread->dbName, pThread->threadIndex, fileNameIndex);
+ }
+ fileNameIndex++;
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ close(fd);
+ taos_free_result(tmpResult);
+ return NULL;
+ }
+ }
+#endif
}
- }
}
- }
- taos_free_result(tmpResult);
- close(fd);
- fclose(fp);
+ taos_free_result(tmpResult);
+ close(fd);
+ fclose(fp);
- return NULL;
+ return NULL;
}
-static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName)
+static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
{
- pthread_attr_t thattr;
- SThreadParaObj *threadObj =
- (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
- for (int t = 0; t < numOfThread; ++t) {
- SThreadParaObj *pThread = threadObj + t;
- pThread->rowsOfDumpOut = 0;
- pThread->tablesOfDumpOut = 0;
- pThread->threadIndex = t;
- pThread->totalThreads = numOfThread;
- tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
- pThread->taosCon = taosCon;
-
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
-
- if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) {
- fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex);
- exit(0);
+ pthread_attr_t thattr;
+ SThreadParaObj *threadObj =
+ (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj));
+
+ if (threadObj == NULL) {
+ errorPrint("%s() LN%d, memory allocation failed!\n",
+ __func__, __LINE__);
+ return;
}
- }
- for (int32_t t = 0; t < numOfThread; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
+ for (int t = 0; t < numOfThread; ++t) {
+ SThreadParaObj *pThread = threadObj + t;
+ pThread->rowsOfDumpOut = 0;
+ pThread->tablesOfDumpOut = 0;
+ pThread->threadIndex = t;
+ pThread->totalThreads = numOfThread;
+ tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taosCon == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ return;
+ }
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&(pThread->threadID), &thattr,
+ taosDumpOutWorkThreadFp,
+ (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread:%d failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(-1);
+ }
+ }
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
- int64_t totalRowsOfDumpOut = 0;
- int64_t totalChildTblsOfDumpOut = 0;
- for (int32_t t = 0; t < numOfThread; ++t) {
- totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut;
- totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut;
- }
+ for (int32_t t = 0; t < numOfThread; ++t) {
+ pthread_join(threadObj[t].threadID, NULL);
+ }
+
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ int64_t totalRowsOfDumpOut = 0;
+ int64_t totalChildTblsOfDumpOut = 0;
+ for (int32_t t = 0; t < numOfThread; ++t) {
+ totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut;
+ totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut;
+ }
- fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", totalChildTblsOfDumpOut);
- fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", totalRowsOfDumpOut);
- g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut;
- g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut;
- free(threadObj);
+ fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n",
+ totalChildTblsOfDumpOut);
+ fprintf(g_fpOfResult, "# row counter: %"PRId64"\n",
+ totalRowsOfDumpOut);
+ g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut;
+ g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut;
+ free(threadObj);
}
+static int32_t taosDumpStable(char *table, FILE *fp,
+ TAOS* taosCon, char* dbName) {
+ uint64_t sizeOfTableDes = (uint64_t)(sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
+ STableDef *tableDes = (STableDef *)calloc(1, sizeOfTableDes);
+ if (NULL == tableDes) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, sizeOfTableDes);
+ exit(-1);
+ }
-int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
- int count = 0;
+ int count = taosGetTableDes(dbName, table, tableDes, taosCon, true);
- STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS);
- if (NULL == tableDes) {
- fprintf(stderr, "failed to allocate memory\n");
- exit(-1);
- }
+ if (count < 0) {
+ free(tableDes);
+ errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
+ __func__, __LINE__, table);
+ exit(-1);
+ }
- count = taosGetTableDes(dbName, table, tableDes, taosCon, true);
+ taosDumpCreateTableClause(tableDes, count, fp, dbName);
- if (count < 0) {
free(tableDes);
- fprintf(stderr, "failed to get stable[%s] schema\n", table);
- exit(-1);
- }
-
- taosDumpCreateTableClause(tableDes, count, fp, dbName);
-
- free(tableDes);
- return 0;
+ return 0;
}
-
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- sprintf(sqlstr, "show %s.stables", dbName);
+ sprintf(sqlstr, "show %s.stables", dbName);
- TAOS_RES* res = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- exit(-1);
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ exit(-1);
+ }
- char tmpFileName[TSDB_FILENAME_LEN + 1];
- memset(tmpFileName, 0, TSDB_FILENAME_LEN);
- sprintf(tmpFileName, ".stables.tmp");
- fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpFileName);
- taos_free_result(res);
- (void)remove(".stables.tmp");
- exit(-1);
- }
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ char tmpFileName[MAX_FILE_NAME_LEN];
+ memset(tmpFileName, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpFileName, ".stables.tmp");
+ fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpFileName);
+ taos_free_result(res);
+ (void)remove(".stables.tmp");
+ exit(-1);
+ }
- while ((row = taos_fetch_row(res)) != NULL) {
- memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
+ while ((row = taos_fetch_row(res)) != NULL) {
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ }
- taos_free_result(res);
- (void)lseek(fd, 0, SEEK_SET);
+ taos_free_result(res);
+ (void)lseek(fd, 0, SEEK_SET);
- int superTblCnt = 0;
- while (1) {
- ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
- if (readLen <= 0) break;
+ int superTblCnt = 0;
+ while (1) {
+ ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
+ if (readLen <= 0) break;
- int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
- if (0 == ret) {
- superTblCnt++;
+ int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
+ if (0 == ret) {
+ superTblCnt++;
+ }
}
- }
- // TODO: save dump super table into result_output.txt
- fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ // TODO: save dump super table into result_output.txt
+ fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- close(fd);
- (void)remove(".stables.tmp");
+ close(fd);
+ (void)remove(".stables.tmp");
- return 0;
+ return 0;
}
-int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) {
- TAOS_ROW row;
- int fd = -1;
- STableRecord tableRecord;
+static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
+ TAOS_ROW row;
+ int fd = -1;
+ STableRecord tableRecord;
- taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
+ taosDumpCreateDbClause(dbInfo, g_args.with_property, fp);
- fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ dbInfo->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- fprintf(fp, "USE %s;\n\n", dbInfo->name);
+ fprintf(fp, "USE %s;\n\n", dbInfo->name);
- (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
+ (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
- sprintf(sqlstr, "show %s.tables", dbInfo->name);
+ sprintf(sqlstr, "show %s.tables", dbInfo->name);
- TAOS_RES* res = taos_query(taosCon, sqlstr);
- int code = taos_errno(res);
- if (code != 0) {
- fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
-
- char tmpBuf[TSDB_FILENAME_LEN + 1];
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".show-tables.tmp");
- fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (fd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- taos_free_result(res);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- memset(&tableRecord, 0, sizeof(STableRecord));
- tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
+ char tmpBuf[MAX_FILE_NAME_LEN];
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".show-tables.tmp");
+ fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (fd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ taos_free_result(res);
+ return -1;
+ }
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- numOfTable++;
- }
- taos_free_result(res);
- lseek(fd, 0, SEEK_SET);
-
- int maxThreads = g_args.thread_num;
- int tableOfPerFile ;
- if (numOfTable <= g_args.thread_num) {
- tableOfPerFile = 1;
- maxThreads = numOfTable;
- } else {
- tableOfPerFile = numOfTable / g_args.thread_num;
- if (0 != numOfTable % g_args.thread_num) {
- tableOfPerFile += 1;
- }
- }
+ int32_t numOfTable = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ memset(&tableRecord, 0, sizeof(STableRecord));
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
+ min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
- char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
- if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
- close(fd);
- return -1;
- }
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
- int32_t numOfThread = 0;
- int subFd = -1;
- for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
- memset(tmpBuf, 0, TSDB_FILENAME_LEN);
- sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
- subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
- if (subFd == -1) {
- fprintf(stderr, "failed to open temp file: %s\n", tmpBuf);
- for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
- (void)remove(tmpBuf);
- }
- sprintf(tmpBuf, ".show-tables.tmp");
- (void)remove(tmpBuf);
- free(tblBuf);
- close(fd);
- return -1;
+ numOfTable++;
}
+ taos_free_result(res);
+ lseek(fd, 0, SEEK_SET);
- // read tableOfPerFile for fd, write to subFd
- ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
- if (readLen <= 0) {
- close(subFd);
- break;
+ int maxThreads = g_args.thread_num;
+ int tableOfPerFile ;
+ if (numOfTable <= g_args.thread_num) {
+ tableOfPerFile = 1;
+ maxThreads = numOfTable;
+ } else {
+ tableOfPerFile = numOfTable / g_args.thread_num;
+ if (0 != numOfTable % g_args.thread_num) {
+ tableOfPerFile += 1;
+ }
}
- taosWrite(subFd, tblBuf, readLen);
- close(subFd);
- }
- sprintf(tmpBuf, ".show-tables.tmp");
- (void)remove(tmpBuf);
+ char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
+ if (NULL == tblBuf){
+ errorPrint("failed to calloc %" PRIzu "\n",
+ tableOfPerFile * sizeof(STableRecord));
+ close(fd);
+ return -1;
+ }
- if (fd >= 0) {
- close(fd);
- fd = -1;
- }
+ int32_t numOfThread = 0;
+ int subFd = -1;
+ for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
+ memset(tmpBuf, 0, MAX_FILE_NAME_LEN);
+ sprintf(tmpBuf, ".tables.tmp.%d", numOfThread);
+ subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
+ if (subFd == -1) {
+ errorPrint("%s() LN%d, failed to open temp file: %s\n",
+ __func__, __LINE__, tmpBuf);
+ for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+ sprintf(tmpBuf, ".show-tables.tmp");
+ (void)remove(tmpBuf);
+ free(tblBuf);
+ close(fd);
+ return -1;
+ }
- taos_free_result(res);
+ // read tableOfPerFile for fd, write to subFd
+ ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord));
+ if (readLen <= 0) {
+ close(subFd);
+ break;
+ }
+ taosWrite(subFd, tblBuf, readLen);
+ close(subFd);
+ }
- // start multi threads to dumpout
- taosStartDumpOutWorkThreads(taosCon, arguments, numOfThread, dbInfo->name);
- for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
- sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ sprintf(tmpBuf, ".show-tables.tmp");
(void)remove(tmpBuf);
- }
- free(tblBuf);
- return 0;
+ if (fd >= 0) {
+ close(fd);
+ fd = -1;
+ }
+
+ // start multi threads to dumpout
+ taosStartDumpOutWorkThreads(numOfThread, dbInfo->name);
+ for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
+ sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
+ (void)remove(tmpBuf);
+ }
+
+ free(tblBuf);
+ return 0;
}
-void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName) {
+static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName) {
int counter = 0;
int count_temp = 0;
char sqlstr[COMMAND_SIZE];
@@ -1704,257 +1896,291 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha
fprintf(fp, "%s\n\n", sqlstr);
}
-void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName) {
- int counter = 0;
- int count_temp = 0;
+static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
+ int numOfCols, FILE *fp, char* dbName) {
+ int counter = 0;
+ int count_temp = 0;
- char* tmpBuf = (char *)malloc(COMMAND_SIZE);
- if (tmpBuf == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return;
- }
+ char* tmpBuf = (char *)malloc(COMMAND_SIZE);
+ if (tmpBuf == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %d memory\n",
+ __func__, __LINE__, COMMAND_SIZE);
+ return;
+ }
- char *pstr = NULL;
- pstr = tmpBuf;
+ char *pstr = NULL;
+ pstr = tmpBuf;
- pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
- dbName, tableDes->name, dbName, metric);
+ pstr += sprintf(tmpBuf,
+ "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
+ dbName, tableDes->name, dbName, metric);
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
- }
+ for (; counter < numOfCols; counter++) {
+ if (tableDes->cols[counter].note[0] != '\0') break;
+ }
- assert(counter < numOfCols);
- count_temp = counter;
+ assert(counter < numOfCols);
+ count_temp = counter;
+
+ for (; counter < numOfCols; counter++) {
+ if (counter != count_temp) {
+ if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
+ strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
+ pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
+ } else {
+ pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
+ }
+ } else {
+ if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
+ strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
+ pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
+ } else {
+ pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
+ }
+ /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */
+ }
- for (; counter < numOfCols; counter++) {
- if (counter != count_temp) {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
- pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
- } else {
- pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note);
- }
- } else {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
- pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
- } else {
- pstr += sprintf(pstr, "%s", tableDes->cols[counter].note);
- }
- /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */
+ /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar")
+ * == 0) { */
+ /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */
+ /* } */
}
- /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar")
- * == 0) { */
- /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */
- /* } */
- }
-
- pstr += sprintf(pstr, ");");
+ pstr += sprintf(pstr, ");");
- fprintf(fp, "%s\n", tmpBuf);
- free(tmpBuf);
+ fprintf(fp, "%s\n", tmpBuf);
+ free(tmpBuf);
}
-int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName) {
- int64_t lastRowsPrint = 5000000;
- int64_t totalRows = 0;
- int count = 0;
- char *pstr = NULL;
- TAOS_ROW row = NULL;
- int numFields = 0;
+static int writeSchemaToAvro(char *jsonAvroSchema)
+{
+ errorPrint("%s() LN%d, TODO: implement write schema to avro",
+ __func__, __LINE__);
+ return 0;
+}
- if (arguments->schemaonly) {
+static int64_t writeResultToAvro(TAOS_RES *res)
+{
+ errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
return 0;
- }
+}
- int32_t sql_buf_len = arguments->max_sql_len;
- char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
- if (tmpBuffer == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
+{
+ int64_t totalRows = 0;
+
+ int32_t sql_buf_len = g_args.max_sql_len;
+ char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
+ if (tmpBuffer == NULL) {
+ errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
+ return -1;
+ }
- pstr = tmpBuffer;
+ char *pstr = tmpBuffer;
- char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbname, arguments->start_time, arguments->end_time);
+ TAOS_ROW row = NULL;
+ int numFields = 0;
+ int rowFlag = 0;
+ int64_t lastRowsPrint = 5000000;
+ int count = 0;
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
- int32_t code = taos_errno(tmpResult);
- if (code != 0) {
- fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
- free(tmpBuffer);
- taos_free_result(tmpResult);
- return -1;
- }
+ numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- numFields = taos_field_count(tmpResult);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(tmpResult);
+ int32_t curr_sqlstr_len = 0;
+ int32_t total_sqlstr_len = 0;
- int rowFlag = 0;
- int32_t curr_sqlstr_len = 0;
- int32_t total_sqlstr_len = 0;
- count = 0;
- while ((row = taos_fetch_row(tmpResult)) != NULL) {
- pstr = tmpBuffer;
- curr_sqlstr_len = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ curr_sqlstr_len = 0;
- int32_t* length = taos_fetch_lengths(tmpResult); // act len
+ int32_t* length = taos_fetch_lengths(res); // act len
- if (count == 0) {
- total_sqlstr_len = 0;
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbname);
- } else {
- if (arguments->mysqlFlag) {
- if (0 == rowFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- rowFlag++;
+ if (count == 0) {
+ total_sqlstr_len = 0;
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ if (g_args.mysqlFlag) {
+ if (0 == rowFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ rowFlag++;
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ }
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ }
}
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- }
- }
- for (int col = 0; col < numFields; col++) {
- if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+ for (int col = 0; col < numFields; col++) {
+ if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
- if (row[col] == NULL) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
- continue;
- }
+ if (row[col] == NULL) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
+ continue;
+ }
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_INT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_BINARY: {
- char tbuf[COMMAND_SIZE] = {0};
- //*(pstr++) = '\'';
- converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- //pstr = stpcpy(pstr, tbuf);
- //*(pstr++) = '\'';
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
+ *((int64_t *)row[col]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ //*(pstr++) = '\'';
+ converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
+ //pstr = stpcpy(pstr, tbuf);
+ //*(pstr++) = '\'';
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!g_args.mysqlFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
+ *(int64_t *)row[col]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[col]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
+ buf, (int)(ts % 1000));
+ }
+ break;
+ default:
+ break;
+ }
}
- case TSDB_DATA_TYPE_NCHAR: {
- char tbuf[COMMAND_SIZE] = {0};
- convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
+
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+
+ totalRows++;
+ count++;
+ fprintf(fp, "%s", tmpBuffer);
+
+ if (totalRows >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
+ totalRows, dbName, tbName);
+ lastRowsPrint += 5000000;
+ }
+
+ total_sqlstr_len += curr_sqlstr_len;
+
+ if ((count >= g_args.data_batch)
+ || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
+ fprintf(fp, ";\n");
+ count = 0;
}
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!arguments->mysqlFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[col]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000));
- }
- break;
- default:
- break;
- }
}
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
+ debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
- totalRows++;
- count++;
- fprintf(fp, "%s", tmpBuffer);
+ fprintf(fp, "\n");
+ atomic_add_fetch_64(&g_totalDumpOutRows, totalRows);
+ free(tmpBuffer);
- if (totalRows >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
- lastRowsPrint += 5000000;
- }
+ return 0;
+}
- total_sqlstr_len += curr_sqlstr_len;
+static int taosDumpTableData(FILE *fp, char *tbName,
+ TAOS* taosCon, char* dbName,
+ char *jsonAvroSchema) {
+ int64_t totalRows = 0;
- if ((count >= arguments->data_batch)
- || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
- fprintf(fp, ";\n");
- count = 0;
- } //else {
- //fprintf(fp, "\\\n");
- //}
- }
+ char sqlstr[1024] = {0};
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ dbName, tbName, g_args.start_time, g_args.end_time);
- printf("total_sqlstr_len: %d\n", total_sqlstr_len);
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("failed to run command %s, reason: %s\n",
+ sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- fprintf(fp, "\n");
- atomic_add_fetch_64(&totalDumpOutRows, totalRows);
+ if (g_args.avro) {
+ writeSchemaToAvro(jsonAvroSchema);
+ totalRows = writeResultToAvro(res);
+ } else {
+ totalRows = writeResultToSql(res, fp, dbName, tbName);
+ }
- taos_free_result(tmpResult);
- free(tmpBuffer);
- return totalRows;
+ taos_free_result(res);
+ return totalRows;
}
-int taosCheckParam(struct arguments *arguments) {
- if (arguments->all_databases && arguments->databases) {
- fprintf(stderr, "conflict option --all-databases and --databases\n");
- return -1;
- }
+static int taosCheckParam(struct arguments *arguments) {
+ if (g_args.all_databases && g_args.databases) {
+ fprintf(stderr, "conflict option --all-databases and --databases\n");
+ return -1;
+ }
- if (arguments->start_time > arguments->end_time) {
- fprintf(stderr, "start time is larger than end time\n");
- return -1;
- }
+ if (g_args.start_time > g_args.end_time) {
+ fprintf(stderr, "start time is larger than end time\n");
+ return -1;
+ }
- if (arguments->arg_list_len == 0) {
- if ((!arguments->all_databases) && (!arguments->isDumpIn)) {
- fprintf(stderr, "taosdump requires parameters\n");
- return -1;
+ if (g_args.arg_list_len == 0) {
+ if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
+ fprintf(stderr, "taosdump requires parameters\n");
+ return -1;
+ }
+ }
+ /*
+ if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
+ fprintf(stderr, "duplicate parameter input and output file path\n");
+ return -1;
+ }
+ */
+ if (!g_args.isDumpIn && g_args.encode != NULL) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
}
- }
-/*
- if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) {
- fprintf(stderr, "duplicate parameter input and output file path\n");
- return -1;
- }
-*/
- if (!arguments->isDumpIn && arguments->encode != NULL) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
- if (arguments->table_batch <= 0) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
+ if (g_args.table_batch <= 0) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
- return 0;
+ return 0;
}
-bool isEmptyCommand(char *cmd) {
+/*
+static bool isEmptyCommand(char *cmd) {
char *pchar = cmd;
while (*pchar != '\0') {
@@ -1965,8 +2191,8 @@ bool isEmptyCommand(char *cmd) {
return true;
}
-void taosReplaceCtrlChar(char *str) {
- _Bool ctrlOn = false;
+static void taosReplaceCtrlChar(char *str) {
+ bool ctrlOn = false;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) {
@@ -2008,6 +2234,7 @@ void taosReplaceCtrlChar(char *str) {
*pstr = '\0';
}
+*/
char *ascii_literal_list[] = {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
@@ -2031,374 +2258,420 @@ char *ascii_literal_list[] = {
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
-int converStringToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- while (size > 0) {
- if (*pstr == '\0') break;
- pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
- pstr++;
- size--;
- }
- *pbuf = '\0';
- return 0;
+static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
+ pstr++;
+ size--;
+ }
+ *pbuf = '\0';
+ return 0;
}
-int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- // TODO
- wchar_t wc;
- while (size > 0) {
- if (*pstr == '\0') break;
- int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
- if (byte_width < 0) {
- fprintf(stderr, "mbtowc() return fail.\n");
- exit(-1);
- }
-
- if ((int)wc < 256) {
- pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
- } else {
- memcpy(pbuf, pstr, byte_width);
- pbuf += byte_width;
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ // TODO
+ wchar_t wc;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
+ if (byte_width < 0) {
+ errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
+ exit(-1);
+ }
+
+ if ((int)wc < 256) {
+ pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ } else {
+ memcpy(pbuf, pstr, byte_width);
+ pbuf += byte_width;
+ }
+ pstr += byte_width;
}
- pstr += byte_width;
- }
- *pbuf = '\0';
+ *pbuf = '\0';
- return 0;
+ return 0;
}
-void taosDumpCharset(FILE *fp) {
- char charsetline[256];
+static void taosDumpCharset(FILE *fp) {
+ char charsetline[256];
- (void)fseek(fp, 0, SEEK_SET);
- sprintf(charsetline, "#!%s\n", tsCharset);
- (void)fwrite(charsetline, strlen(charsetline), 1, fp);
+ (void)fseek(fp, 0, SEEK_SET);
+ sprintf(charsetline, "#!%s\n", tsCharset);
+ (void)fwrite(charsetline, strlen(charsetline), 1, fp);
}
-void taosLoadFileCharset(FILE *fp, char *fcharset) {
- char * line = NULL;
- size_t line_size = 0;
+static void taosLoadFileCharset(FILE *fp, char *fcharset) {
+ char * line = NULL;
+ size_t line_size = 0;
- (void)fseek(fp, 0, SEEK_SET);
- ssize_t size = getline(&line, &line_size, fp);
- if (size <= 2) {
- goto _exit_no_charset;
- }
+ (void)fseek(fp, 0, SEEK_SET);
+ ssize_t size = getline(&line, &line_size, fp);
+ if (size <= 2) {
+ goto _exit_no_charset;
+ }
- if (strncmp(line, "#!", 2) != 0) {
- goto _exit_no_charset;
- }
- if (line[size - 1] == '\n') {
- line[size - 1] = '\0';
- size--;
- }
- strcpy(fcharset, line + 2);
+ if (strncmp(line, "#!", 2) != 0) {
+ goto _exit_no_charset;
+ }
+ if (line[size - 1] == '\n') {
+ line[size - 1] = '\0';
+ size--;
+ }
+ strcpy(fcharset, line + 2);
- tfree(line);
- return;
+ tfree(line);
+ return;
_exit_no_charset:
- (void)fseek(fp, 0, SEEK_SET);
- *fcharset = '\0';
- tfree(line);
- return;
+ (void)fseek(fp, 0, SEEK_SET);
+ *fcharset = '\0';
+ tfree(line);
+ return;
}
// ======== dumpIn support multi threads functions ================================//
-static char **tsDumpInSqlFiles = NULL;
-static int32_t tsSqlFileNum = 0;
-static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0};
-static char tsfCharset[64] = {0};
-static int taosGetFilesNum(const char *directoryName, const char *prefix)
+static char **g_tsDumpInSqlFiles = NULL;
+static int32_t g_tsSqlFileNum = 0;
+static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
+static char g_tsCharset[64] = {0};
+
+static int taosGetFilesNum(const char *directoryName,
+ const char *prefix, const char *prefix2)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
+ char cmd[1024] = { 0 };
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ if (prefix2)
+ sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ",
+ directoryName, prefix, directoryName, prefix2);
+ else
+ sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
- int fileNum = 0;
- if (fscanf(fp, "%d", &fileNum) != 1) {
- fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd);
- exit(0);
- }
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
+ }
- if (fileNum <= 0) {
- fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName);
- exit(0);
- }
+ int fileNum = 0;
+ if (fscanf(fp, "%d", &fileNum) != 1) {
+ errorPrint("failed to execute:%s, parse result error\n", cmd);
+ exit(-1);
+ }
+
+ if (fileNum <= 0) {
+ errorPrint("directory:%s is empry\n", directoryName);
+ exit(-1);
+ }
- pclose(fp);
- return fileNum;
+ pclose(fp);
+ return fileNum;
}
-static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles)
+static void taosParseDirectory(const char *directoryName,
+ const char *prefix, const char *prefix2,
+ char **fileArray, int totalFiles)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ char cmd[1024] = { 0 };
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ if (prefix2) {
+ sprintf(cmd, "ls %s/*.%s %s/*.%s | sort",
+ directoryName, prefix, directoryName, prefix2);
+ } else {
+ sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ }
- int fileNum = 0;
- while (fscanf(fp, "%128s", fileArray[fileNum++])) {
- if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) {
- fileNum--;
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
}
- if (fileNum >= totalFiles) {
- break;
+
+ int fileNum = 0;
+ while (fscanf(fp, "%128s", fileArray[fileNum++])) {
+ if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) {
+ fileNum--;
+ }
+ if (fileNum >= totalFiles) {
+ break;
+ }
}
- }
- if (fileNum != totalFiles) {
- fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName);
- pclose(fp);
- exit(0);
- }
+ if (fileNum != totalFiles) {
+ errorPrint("directory:%s changed while read\n", directoryName);
+ pclose(fp);
+ exit(-1);
+ }
- pclose(fp);
+ pclose(fp);
}
-static void taosCheckTablesSQLFile(const char *directoryName)
+static void taosCheckDatabasesSQLFile(const char *directoryName)
{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/dbs.sql", directoryName);
+ char cmd[1024] = { 0 };
+ sprintf(cmd, "ls %s/dbs.sql", directoryName);
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(0);
- }
+ FILE *fp = popen(cmd, "r");
+ if (fp == NULL) {
+ errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
+ exit(-1);
+ }
- while (fscanf(fp, "%128s", tsDbSqlFile)) {
- break;
- }
+ while (fscanf(fp, "%128s", g_tsDbSqlFile)) {
+ break;
+ }
- pclose(fp);
+ pclose(fp);
}
-static void taosMallocSQLFiles()
+static void taosMallocDumpFiles()
{
- tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*));
- for (int i = 0; i < tsSqlFileNum; i++) {
- tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN);
- }
+ g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
+ for (int i = 0; i < g_tsSqlFileNum; i++) {
+ g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ }
}
-static void taosFreeSQLFiles()
+static void taosFreeDumpFiles()
{
- for (int i = 0; i < tsSqlFileNum; i++) {
- tfree(tsDumpInSqlFiles[i]);
- }
- tfree(tsDumpInSqlFiles);
+ for (int i = 0; i < g_tsSqlFileNum; i++) {
+ tfree(g_tsDumpInSqlFiles[i]);
+ }
+ tfree(g_tsDumpInSqlFiles);
}
static void taosGetDirectoryFileList(char *inputDir)
{
- struct stat fileStat;
- if (stat(inputDir, &fileStat) < 0) {
- fprintf(stderr, "ERROR: %s not exist\n", inputDir);
- exit(0);
- }
-
- if (fileStat.st_mode & S_IFDIR) {
- taosCheckTablesSQLFile(inputDir);
- tsSqlFileNum = taosGetFilesNum(inputDir, "sql");
- int tsSqlFileNumOfTbls = tsSqlFileNum;
- if (tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ struct stat fileStat;
+ if (stat(inputDir, &fileStat) < 0) {
+ errorPrint("%s not exist\n", inputDir);
+ exit(-1);
}
- taosMallocSQLFiles();
- if (0 != tsSqlFileNumOfTbls) {
- taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+
+ if (fileStat.st_mode & S_IFDIR) {
+ taosCheckDatabasesSQLFile(inputDir);
+ if (g_args.avro)
+ g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
+ else
+ g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
+
+ int tsSqlFileNumOfTbls = g_tsSqlFileNum;
+ if (g_tsDbSqlFile[0] != 0) {
+ tsSqlFileNumOfTbls--;
+ }
+ taosMallocDumpFiles();
+ if (0 != tsSqlFileNumOfTbls) {
+ if (g_args.avro) {
+ taosParseDirectory(inputDir, "sql", "avro",
+ g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+ } else {
+ taosParseDirectory(inputDir, "sql", NULL,
+ g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
+ }
+ }
+ fprintf(stdout, "\nstart to dispose %d files in %s\n",
+ g_tsSqlFileNum, inputDir);
+ } else {
+ errorPrint("%s is not a directory\n", inputDir);
+ exit(-1);
}
- fprintf(stdout, "\nstart to dispose %d files in %s\n", tsSqlFileNum, inputDir);
- }
- else {
- fprintf(stderr, "ERROR: %s is not a directory\n", inputDir);
- exit(0);
- }
}
-static FILE* taosOpenDumpInFile(char *fptr) {
- wordexp_t full_path;
-
- if (wordexp(fptr, &full_path, 0) != 0) {
- fprintf(stderr, "ERROR: illegal file name: %s\n", fptr);
- return NULL;
- }
+static FILE* taosOpenDumpInFile(char *fptr) {
+ wordexp_t full_path;
- char *fname = full_path.we_wordv[0];
+ if (wordexp(fptr, &full_path, 0) != 0) {
+ errorPrint("illegal file name: %s\n", fptr);
+ return NULL;
+ }
- FILE *f = fopen(fname, "r");
- if (f == NULL) {
- fprintf(stderr, "ERROR: failed to open file %s\n", fname);
- wordfree(&full_path);
- return NULL;
- }
+ char *fname = full_path.we_wordv[0];
- wordfree(&full_path);
+ FILE *f = NULL;
+ if ((fname) && (strlen(fname) > 0)) {
+ f = fopen(fname, "r");
+ if (f == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, fname);
+ }
+ }
- return f;
+ wordfree(&full_path);
+ return f;
}
-int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) {
- int read_len = 0;
- char * cmd = NULL;
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
-
- cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
- if (cmd == NULL) {
- fprintf(stderr, "failed to allocate memory\n");
- return -1;
- }
+static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset,
+ char* encode, char* fileName) {
+ int read_len = 0;
+ char * cmd = NULL;
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
+
+ cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
+ if (cmd == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- int lastRowsPrint = 5000000;
- int lineNo = 0;
- while ((read_len = getline(&line, &line_len, fp)) != -1) {
- ++lineNo;
- if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
- line[--read_len] = '\0';
+ int lastRowsPrint = 5000000;
+ int lineNo = 0;
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ ++lineNo;
+ if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
+ line[--read_len] = '\0';
- //if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
- continue;
- }
+ //if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ if (read_len == 0 ) {
+ continue;
+ }
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
+ }
- memcpy(cmd + cmd_len, line, read_len);
- cmd[read_len + cmd_len]= '\0';
- if (queryDbImpl(taos, cmd)) {
- fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
- }
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd[read_len + cmd_len]= '\0';
+ if (queryDbImpl(taos, cmd)) {
+ errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n",
+ __func__, __LINE__, lineNo, fileName);
+ fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
+ }
- memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
+ memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
+ cmd_len = 0;
- if (lineNo >= lastRowsPrint) {
- printf(" %d lines already be executed from file %s\n", lineNo, fileName);
- lastRowsPrint += 5000000;
+ if (lineNo >= lastRowsPrint) {
+ printf(" %d lines already be executed from file %s\n", lineNo, fileName);
+ lastRowsPrint += 5000000;
+ }
}
- }
- tfree(cmd);
- tfree(line);
- fclose(fp);
- return 0;
+ tfree(cmd);
+ tfree(line);
+ fclose(fp);
+ return 0;
}
-void* taosDumpInWorkThreadFp(void *arg)
+static void* taosDumpInWorkThreadFp(void *arg)
{
- SThreadParaObj *pThread = (SThreadParaObj*)arg;
- for (int32_t f = 0; f < tsSqlFileNum; ++f) {
- if (f % pThread->totalThreads == pThread->threadIndex) {
- char *SQLFileName = tsDumpInSqlFiles[f];
- FILE* fp = taosOpenDumpInFile(SQLFileName);
- if (NULL == fp) {
- continue;
- }
- fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
- taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName);
+ SThreadParaObj *pThread = (SThreadParaObj*)arg;
+ for (int32_t f = 0; f < g_tsSqlFileNum; ++f) {
+ if (f % pThread->totalThreads == pThread->threadIndex) {
+ char *SQLFileName = g_tsDumpInSqlFiles[f];
+ FILE* fp = taosOpenDumpInFile(SQLFileName);
+ if (NULL == fp) {
+ continue;
+ }
+ fprintf(stderr, ", Success Open input file: %s\n",
+ SQLFileName);
+ taosDumpInOneFile(pThread->taosCon, fp, g_tsCharset, g_args.encode, SQLFileName);
+ }
}
- }
- return NULL;
+ return NULL;
}
-static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
+static void taosStartDumpInWorkThreads()
{
- pthread_attr_t thattr;
- SThreadParaObj *pThread;
- int32_t totalThreads = args->thread_num;
+ pthread_attr_t thattr;
+ SThreadParaObj *pThread;
+ int32_t totalThreads = g_args.thread_num;
- if (totalThreads > tsSqlFileNum) {
- totalThreads = tsSqlFileNum;
- }
+ if (totalThreads > g_tsSqlFileNum) {
+ totalThreads = g_tsSqlFileNum;
+ }
- SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
- for (int32_t t = 0; t < totalThreads; ++t) {
- pThread = threadObj + t;
- pThread->threadIndex = t;
- pThread->totalThreads = totalThreads;
- pThread->taosCon = taosCon;
+ SThreadParaObj *threadObj = (SThreadParaObj *)calloc(
+ totalThreads, sizeof(SThreadParaObj));
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ if (NULL == threadObj) {
+ errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__);
+ }
- if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) {
- fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex);
- exit(0);
+ for (int32_t t = 0; t < totalThreads; ++t) {
+ pThread = threadObj + t;
+ pThread->threadIndex = t;
+ pThread->totalThreads = totalThreads;
+ pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taosCon == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ return;
+ }
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&(pThread->threadID), &thattr,
+ taosDumpInWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread:%d failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(0);
+ }
}
- }
- for (int t = 0; t < totalThreads; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
+ for (int t = 0; t < totalThreads; ++t) {
+ pthread_join(threadObj[t].threadID, NULL);
+ }
- for (int t = 0; t < totalThreads; ++t) {
- taos_close(threadObj[t].taosCon);
- }
- free(threadObj);
+ for (int t = 0; t < totalThreads; ++t) {
+ taos_close(threadObj[t].taosCon);
+ }
+ free(threadObj);
}
+static int taosDumpIn() {
+ assert(g_args.isDumpIn);
+
+ TAOS *taos = NULL;
+ FILE *fp = NULL;
-int taosDumpIn(struct arguments *arguments) {
- assert(arguments->isDumpIn);
+ taos = taos_connect(
+ g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("%s() LN%d, failed to connect to TDengine server\n",
+ __func__, __LINE__);
+ return -1;
+ }
- TAOS *taos = NULL;
- FILE *fp = NULL;
+ taosGetDirectoryFileList(g_args.inpath);
- taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port);
- if (taos == NULL) {
- fprintf(stderr, "failed to connect to TDengine server\n");
- return -1;
- }
+ int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum;
+ if (g_tsDbSqlFile[0] != 0) {
+ tsSqlFileNumOfTbls--;
- taosGetDirectoryFileList(arguments->inpath);
+ fp = taosOpenDumpInFile(g_tsDbSqlFile);
+ if (NULL == fp) {
+ errorPrint("%s() LN%d, failed to open input file %s\n",
+ __func__, __LINE__, g_tsDbSqlFile);
+ return -1;
+ }
+ fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
- int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
- if (tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ taosLoadFileCharset(fp, g_tsCharset);
- fp = taosOpenDumpInFile(tsDbSqlFile);
- if (NULL == fp) {
- fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
- return -1;
+ taosDumpInOneFile(taos, fp, g_tsCharset, g_args.encode,
+ g_tsDbSqlFile);
}
- fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
- taosLoadFileCharset(fp, tsfCharset);
- taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
- }
+ taos_close(taos);
- if (0 != tsSqlFileNumOfTbls) {
- taosStartDumpInWorkThreads(taos, arguments);
- }
+ if (0 != tsSqlFileNumOfTbls) {
+ taosStartDumpInWorkThreads();
+ }
- taos_close(taos);
- taosFreeSQLFiles();
- return 0;
+ taosFreeDumpFiles();
+ return 0;
}
-
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 189c4b60056ae37667dd0c15aac664c37ebb1b91..beeff372aa75a34c4be1857782a76c2426748140 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1035,6 +1035,20 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
return code;
}
+static uint64_t mnodeCreateSuperTableUid() {
+ int64_t us = taosGetTimestampUs();
+ uint64_t x = (us & ((((uint64_t)1)<<40) - 1));
+ x = x << 24;
+
+ return x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+}
+
+static uint64_t mnodeCreateTableUid(int32_t vgId, int32_t tid) {
+ uint64_t uid = (((uint64_t)vgId) << 48) + ((((uint64_t)tid) & ((1ul << 24) - 1ul)) << 24) +
+ ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ return uid;
+}
+
static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
@@ -1058,19 +1072,16 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
}
- SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
+ SSTableObj *pStable = calloc(1, sizeof(SSTableObj));
if (pStable == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
- int64_t us = taosGetTimestampUs();
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
- uint64_t x = (us&0x000000FFFFFFFFFF);
- x = x<<24;
- pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pStable->uid = mnodeCreateSuperTableUid();
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
@@ -1079,7 +1090,8 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
int32_t schemaSize = numOfCols * sizeof(SSchema);
pStable->schema = (SSchema *)calloc(1, schemaSize);
if (pStable->schema == NULL) {
- free(pStable);
+ tfree(pStable->info.tableId);
+ tfree(pStable);
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
@@ -1096,6 +1108,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (!tIsValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) {
mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
+ tfree(pStable->info.tableId);
+ tfree(pStable->schema);
+ tfree(pStable);
return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG;
}
@@ -2069,18 +2084,13 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
}
pTable->suid = pMsg->pSTable->uid;
- pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) +
- ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid);
pTable->superTable = pMsg->pSTable;
} else {
if (pTable->info.type == TSDB_SUPER_TABLE) {
- int64_t us = taosGetTimestampUs();
- uint64_t x = (us&0x000000FFFFFFFFFF);
- x = x<<24;
- pTable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateSuperTableUid();
} else {
- pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) +
- ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid);
}
pTable->sversion = 0;
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index 891dccaf9779016065013d4f59580026fb98352a..04b1efe7bf78d57a8806960995b0e34ff79e3abb 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -74,13 +74,14 @@ bool taosGetProcMemory(float *memoryUsedMB) {
return false;
}
+ ssize_t _bytes = 0;
size_t len;
char * line = NULL;
while (!feof(fp)) {
tfree(line);
len = 0;
- getline(&line, &len, fp);
- if (line == NULL) {
+ _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
break;
}
if (strstr(line, "VmRSS:") != NULL) {
@@ -113,8 +114,8 @@ static bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) {
size_t len;
char * line = NULL;
- getline(&line, &len, fp);
- if (line == NULL) {
+ ssize_t _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
uError("read file:%s failed", tsSysCpuFile);
fclose(fp);
return false;
@@ -138,8 +139,8 @@ static bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
size_t len = 0;
char * line = NULL;
- getline(&line, &len, fp);
- if (line == NULL) {
+ ssize_t _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
uError("read file:%s failed", tsProcCpuFile);
fclose(fp);
return false;
@@ -339,6 +340,7 @@ static bool taosGetCardInfo(int64_t *bytes) {
return false;
}
+ ssize_t _bytes = 0;
size_t len = 2048;
char * line = calloc(1, len);
@@ -357,7 +359,12 @@ static bool taosGetCardInfo(int64_t *bytes) {
int64_t nouse6 = 0;
char nouse0[200] = {0};
- getline(&line, &len, fp);
+ _bytes = getline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
if (strstr(line, "lo:") != NULL) {
@@ -420,6 +427,7 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
return false;
}
+ ssize_t _bytes = 0;
size_t len;
char * line = NULL;
char tmp[10];
@@ -428,8 +436,8 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) {
while (!feof(fp)) {
tfree(line);
len = 0;
- getline(&line, &len, fp);
- if (line == NULL) {
+ _bytes = getline(&line, &len, fp);
+ if ((_bytes < 0) || (line == NULL)) {
break;
}
if (strstr(line, "rchar:") != NULL) {
diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c
index 417513314c7013a3e707999bfbd7f9dbd1a4baa8..b7b268b19e6b6f92babb74cfd3f23793be037cd0 100644
--- a/src/os/src/linux/linuxEnv.c
+++ b/src/os/src/linux/linuxEnv.c
@@ -25,6 +25,13 @@ void osInit() {
strcpy(tsDataDir, "/var/lib/power");
strcpy(tsLogDir, "/var/log/power");
strcpy(tsScriptDir, "/etc/power");
+#elif (_TD_TQ_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/tq");
+ }
+ strcpy(tsDataDir, "/var/lib/tq");
+ strcpy(tsLogDir, "/var/log/tq");
+ strcpy(tsScriptDir, "/etc/tq");
#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c
index 19351eb7c964a4c2a8a4d1d5d4d1c8ec669908dc..b35cb8f040aec5ff4b4fb12665d0842e72958ba1 100644
--- a/src/os/src/windows/wEnv.c
+++ b/src/os/src/windows/wEnv.c
@@ -31,7 +31,14 @@ void osInit() {
strcpy(tsDataDir, "C:/PowerDB/data");
strcpy(tsLogDir, "C:/PowerDB/log");
strcpy(tsScriptDir, "C:/PowerDB/script");
-
+#elif (_TD_TQ_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/TQ/cfg");
+ }
+ strcpy(tsVnodeDir, "C:/TQ/data");
+ strcpy(tsDataDir, "C:/TQ/data");
+ strcpy(tsLogDir, "C:/TQ/log");
+ strcpy(tsScriptDir, "C:/TQ/script");
#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
@@ -48,9 +55,10 @@ void osInit() {
strcpy(tsOsName, "Windows");
const char *tmpDir = getenv("tmp");
- if (tmpDir != NULL) {
+ if (tmpDir == NULL) {
tmpDir = getenv("temp");
}
+
if (tmpDir != NULL) {
strcpy(tsTempDir, tmpDir);
} else {
diff --git a/src/os/tests/CMakeLists.txt b/src/os/tests/CMakeLists.txt
index b00f0ebdc85a24974fd36228ff69ced16a32b76d..17bb9080961f71519520d15e4fea66100fce5693 100644
--- a/src/os/tests/CMakeLists.txt
+++ b/src/os/tests/CMakeLists.txt
@@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11
@@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(osTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread)
-ENDIF()
\ No newline at end of file
+ENDIF()
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index df4c4fbc63d469c22ea13273564bec49562f35a9..10300e93670b5e10f56259d51b6ca31df3e90e39 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -297,6 +297,7 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
}
default:
+ fractionLen = 0;
assert(false);
}
@@ -342,6 +343,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
}
default:
+ fractionLen = 0;
assert(false);
}
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 8279c58b24796c734b39e97e9a8e953e0248332f..bc934647ec4827faea4814dfbfb46acb34033668 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -571,6 +571,7 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows);
void* destroyOutputBuf(SSDataBlock* pBlock);
+void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order);
int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput);
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index c6f667475ee7b061fcbc86be7a2aa8fac7282e8e..f9a9992b81aa30b15b042a96af343c9e943bd20b 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -237,18 +237,20 @@ typedef struct tSqlExpr {
uint16_t type; // sql node type
uint32_t tokenId; // TK_LE: less than(binary expr)
- // the whole string of the function(col, param), while the function name is kept in token
- SStrToken operand;
- uint32_t functionId; // function id
+ // the whole string of the function(col, param), while the function name is kept in exprToken
+ struct {
+ SStrToken operand;
+ struct SArray *paramList; // function parameters list
+ } Expr;
- SStrToken colInfo; // table column info
+ uint32_t functionId; // function id, todo remove it
+ SStrToken columnName; // table column info
tVariant value; // the use input value
- SStrToken token; // original sql expr string
- uint32_t flags;
+ SStrToken exprToken; // original sql expr string
+ uint32_t flags; // todo remove it
struct tSqlExpr *pLeft; // left child
struct tSqlExpr *pRight; // right child
- struct SArray *pParam; // function parameters list
} tSqlExpr;
// used in select clause. select from xxx
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index ce2c3e361654ebe67310e6b71edc4ea6506d64ed..5b6b930e85b6fc2eb6dcfa483ef190d48bed23f9 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -681,7 +681,7 @@ where_opt(A) ::= WHERE expr(X). {A = X;}
%type expr {tSqlExpr*}
%destructor expr {tSqlExprDestroy($$);}
-expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->token.z = X.z; A->token.n = (Z.z - X.z + 1);}
+expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->exprToken.z = X.z; A->exprToken.n = (Z.z - X.z + 1);}
expr(A) ::= ID(X). { A = tSqlExprCreateIdValue(&X, TK_ID);}
expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(&X, TK_ID);}
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index ef1408ab28d322b3392e53b67321c76b9edaaaf9..96dd2e1de0e6833ae2e0b727299d3ca8a16218e6 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -1575,7 +1575,7 @@ static void last_function(SQLFunctionCtx *pCtx) {
memcpy(pCtx->pOutput, data, pCtx->inputBytes);
- TSKEY ts = GET_TS_DATA(pCtx, i);
+ TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
DO_UPDATE_TAG_COLUMNS(pCtx, ts);
pResInfo->hasResult = DATA_SET_FLAG;
@@ -1590,7 +1590,7 @@ static void last_function(SQLFunctionCtx *pCtx) {
continue;
}
- TSKEY ts = GET_TS_DATA(pCtx, i);
+ TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 5989bd810bdfac990cdd5fbe23775e0b8eed017b..ef88f8bc0641b50438fd4e72e335ccb7828e124e 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -179,7 +179,6 @@ static STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* w
static STableIdInfo createTableIdInfo(STableQueryInfo* pTableQueryInfo);
static void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInfo* pDownstream);
-static void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr);
@@ -237,37 +236,40 @@ static int compareRowData(const void *a, const void *b, const void *userData) {
static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, SSDataBlock* pDataBlock) {
SArray *columnOrderList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
- if (taosArrayGetSize(columnOrderList) <= 0) {
+ size_t size = taosArrayGetSize(columnOrderList);
+ taosArrayDestroy(columnOrderList);
+
+ if (size <= 0) {
return;
}
int32_t orderId = pRuntimeEnv->pQueryAttr->order.orderColId;
if (orderId <= 0) {
return;
- }
+ }
+
bool found = false;
int16_t dataOffset = 0;
- //SColIndex *index = taosArrayGet(columnOrderList, 0);
for (int32_t j = 0; j < pDataBlock->info.numOfCols; ++j) {
SColumnInfoData* pColInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock->pDataBlock, j);
if (orderId == j) {
found = true;
break;
}
+
dataOffset += pColInfoData->info.bytes;
}
if (found == false) {
return;
}
+
int16_t type = pRuntimeEnv->pQueryAttr->pExpr1[orderId].base.resType;
SRowCompSupporter support = {.pRuntimeEnv = pRuntimeEnv, .dataOffset = dataOffset, .comFunc = getComparFunc(type, 0)};
-
taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support);
- return;
-
}
+
//setup the output buffer for each operator
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) {
const static int32_t minSize = 8;
@@ -1405,8 +1407,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
}
doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
- tfree(pInfo->prevData);
}
+
+ tfree(pInfo->prevData);
}
static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
@@ -1740,8 +1743,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
}
for(int32_t i = 1; i < numOfOutput; ++i) {
- (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) +
- pExpr[i - 1].base.interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
+ (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + pExpr[i - 1].base.interBytes);
}
setCtxTagColumnInfo(pFuncCtx, numOfOutput);
@@ -1777,9 +1779,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
+
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen);
- pRuntimeEnv->currentOffset = pQueryAttr->limit.offset;
// NOTE: pTableCheckInfo need to update the query time range and the lastKey info
pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
@@ -1798,7 +1800,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQueryAttr->tableCols[i-1].bytes;
}
- *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN;
+ if (pQueryAttr->tableCols[0].type == TSDB_DATA_TYPE_TIMESTAMP) {
+ *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN;
+ }
}
qDebug("QInfo:0x%"PRIx64" init runtime environment completed", GET_QID(pRuntimeEnv));
@@ -1832,7 +1836,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_Groupby: {
pRuntimeEnv->proot =
createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
- setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+
+ int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
+ if (opType != OP_DummyInput) {
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ }
break;
}
case OP_SessionWindow: {
@@ -2683,10 +2691,6 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData
}
void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock) {
- if (numOfFilterCols > 0 && pFilterInfo[0].pData != NULL) {
- return;
- }
-
// set the initial static data value filter expression
for (int32_t i = 0; i < numOfFilterCols; ++i) {
for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) {
@@ -3713,6 +3717,9 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
}
int32_t numOfRowsToCopy = pRow->numOfRows;
+ if (numOfResult + numOfRowsToCopy >= pRuntimeEnv->resultInfo.capacity) {
+ break;
+ }
pGroupResInfo->index += 1;
@@ -3847,14 +3854,17 @@ int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutp
}
void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType) {
- SQueryProfEvent event;
- event.eventType = eventType;
- event.eventTime = taosGetTimestampUs();
+ SQueryProfEvent event = {0};
+
+ event.eventType = eventType;
+ event.eventTime = taosGetTimestampUs();
event.operatorType = operatorInfo->operatorType;
- SQInfo* qInfo = operatorInfo->pRuntimeEnv->qinfo;
- if (qInfo->summary.queryProfEvents) {
- taosArrayPush(qInfo->summary.queryProfEvents, &event);
+ if (operatorInfo->pRuntimeEnv) {
+ SQInfo* pQInfo = operatorInfo->pRuntimeEnv->qinfo;
+ if (pQInfo->summary.queryProfEvents) {
+ taosArrayPush(pQInfo->summary.queryProfEvents, &event);
+ }
}
}
@@ -6100,7 +6110,14 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo));
pInfo->colIndex = -1; // group by column index
+
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
+
+ SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
+
+ pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize *
+ (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)));
+
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
@@ -7563,6 +7580,7 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
pRuntimeEnv->prevResult = prevResult;
}
+ pRuntimeEnv->currentOffset = pQueryAttr->limit.offset;
if (tsdb != NULL) {
pQueryAttr->precision = tsdbGetCfg(tsdb)->precision;
}
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index 9c06a87b81c595a01f683c17c87b0418a09a5098..e724b0418c5fe5e9a34459e09cf37c535d3236f2 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -127,7 +127,8 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
SColumn* pCol = taosArrayGetP(tableCols, i);
SColumnIndex index = {.tableIndex = 0, .columnIndex = pCol->columnIndex};
- SExprInfo* p = tscExprCreate(pQueryInfo, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes,
+ STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes,
pCol->info.colId, 0, TSDB_COL_NORMAL);
strncpy(p->base.aliasName, pSchema[pCol->columnIndex].name, tListLen(p->base.aliasName));
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index a2cb7aee00c8bd8173557a5bf19e8e337290c25c..fd957fdcce133cd636617ad0bf924ad3ea26d775 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -124,7 +124,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
tSqlExpr *pSqlExpr = calloc(1, sizeof(tSqlExpr));
if (pToken != NULL) {
- pSqlExpr->token = *pToken;
+ pSqlExpr->exprToken = *pToken;
}
if (optrType == TK_NULL) {
@@ -161,7 +161,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
// Here it must be the column name (tk_id) if it is not a number or string.
assert(optrType == TK_ID || optrType == TK_ALL);
if (pToken != NULL) {
- pSqlExpr->colInfo = *pToken;
+ pSqlExpr->columnName = *pToken;
}
pSqlExpr->tokenId = optrType;
@@ -180,17 +180,17 @@ tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToke
return NULL;
}
- tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
- pExpr->tokenId = optType;
- pExpr->type = SQL_NODE_SQLFUNCTION;
- pExpr->pParam = pParam;
+ tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
+ pExpr->tokenId = optType;
+ pExpr->type = SQL_NODE_SQLFUNCTION;
+ pExpr->Expr.paramList = pParam;
int32_t len = (int32_t)((endToken->z + endToken->n) - pFuncToken->z);
- pExpr->operand = (*pFuncToken);
+ pExpr->Expr.operand = (*pFuncToken);
- pExpr->token.n = len;
- pExpr->token.z = pFuncToken->z;
- pExpr->token.type = pFuncToken->type;
+ pExpr->exprToken.n = len;
+ pExpr->exprToken.z = pFuncToken->z;
+ pExpr->exprToken.type = pFuncToken->type;
return pExpr;
}
@@ -204,16 +204,16 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
pExpr->type = SQL_NODE_EXPR;
if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) {
- char* endPos = pRight->token.z + pRight->token.n;
- pExpr->token.z = pLeft->token.z;
- pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
- pExpr->token.type = pLeft->token.type;
+ char* endPos = pRight->exprToken.z + pRight->exprToken.n;
+ pExpr->exprToken.z = pLeft->exprToken.z;
+ pExpr->exprToken.n = (uint32_t)(endPos - pExpr->exprToken.z);
+ pExpr->exprToken.type = pLeft->exprToken.type;
}
if ((pLeft != NULL && pRight != NULL) &&
(optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) {
/*
- * if a token is noted as the TK_TIMESTAMP, the time precision is microsecond
+ * if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond
* Otherwise, the time precision is adaptive, determined by the time precision from databases.
*/
if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) ||
@@ -304,7 +304,7 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
tSqlExpr *pRSub = calloc(1, sizeof(tSqlExpr));
pRSub->tokenId = TK_SET; // TODO refactor .....
- pRSub->pParam = (SArray *)pRight;
+ pRSub->Expr.paramList = (SArray *)pRight;
pExpr->pRight = pRSub;
} else {
@@ -346,8 +346,8 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|| (left->pLeft == NULL && right->pLeft)
|| (left->pRight && right->pRight == NULL)
|| (left->pRight == NULL && right->pRight)
- || (left->pParam && right->pParam == NULL)
- || (left->pParam == NULL && right->pParam)) {
+ || (left->Expr.paramList && right->Expr.paramList == NULL)
+ || (left->Expr.paramList == NULL && right->Expr.paramList)) {
return 1;
}
@@ -355,20 +355,20 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
return 1;
}
- if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
+ if (tStrTokenCompare(&left->columnName, &right->columnName)) {
return 1;
}
- if (right->pParam && left->pParam) {
- size_t size = taosArrayGetSize(right->pParam);
- if (left->pParam && taosArrayGetSize(left->pParam) != size) {
+ if (right->Expr.paramList && left->Expr.paramList) {
+ size_t size = taosArrayGetSize(right->Expr.paramList);
+ if (left->Expr.paramList && taosArrayGetSize(left->Expr.paramList) != size) {
return 1;
}
for (int32_t i = 0; i < size; i++) {
- tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
+ tSqlExprItem* pLeftElem = taosArrayGet(left->Expr.paramList, i);
tSqlExpr* pSubLeft = pLeftElem->pNode;
- tSqlExprItem* pRightElem = taosArrayGet(right->pParam, i);
+ tSqlExprItem* pRightElem = taosArrayGet(right->Expr.paramList, i);
tSqlExpr* pSubRight = pRightElem->pNode;
if (tSqlExprCompare(pSubLeft, pSubRight)) {
@@ -401,8 +401,8 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
pExpr->pRight = tSqlExprClone(pSrc->pRight);
}
- //we don't clone pParam now because clone is only used for between/and
- assert(pSrc->pParam == NULL);
+ //we don't clone paramList now because clone is only used for between/and
+ assert(pSrc->Expr.paramList == NULL);
return pExpr;
}
@@ -460,7 +460,7 @@ static void doDestroySqlExprNode(tSqlExpr *pExpr) {
tVariantDestroy(&pExpr->value);
}
- tSqlExprListDestroy(pExpr->pParam);
+ tSqlExprListDestroy(pExpr->Expr.paramList);
free(pExpr);
}
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index d3f478ebeb5fefe8bba2df859028dc8b97e66527..8fe3538a93c182b0f766a9542212e10d1ff4dd04 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -2921,7 +2921,7 @@ static void yy_reduce(
{yymsp[-3].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[-2].minor.yy369;}
break;
case 223: /* expr ::= LP expr RP */
-{yylhsminor.yy166 = yymsp[-1].minor.yy166; yylhsminor.yy166->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy166->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
+{yylhsminor.yy166 = yymsp[-1].minor.yy166; yylhsminor.yy166->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy166->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
yymsp[-2].minor.yy166 = yylhsminor.yy166;
break;
case 224: /* expr ::= ID */
diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt
index 0cfe2ff1659027561799145e8ef8ed514988416c..6169c5666e239c9a6fec3c961849d2a2abd5fa1a 100644
--- a/src/query/tests/CMakeLists.txt
+++ b/src/query/tests/CMakeLists.txt
@@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index 605f7d2a326f9b2a66865d9ad0edc2987eb57f81..e958a8e5ec5b6542d609028ee052d21a9a84d397 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -1189,7 +1189,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte
}
rpcSendReqToServer(pRpc, pContext);
rpcFreeCont(rpcMsg.pCont);
- } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY) {
+ } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY || pHead->code == TSDB_CODE_DND_EXITING) {
pContext->code = pHead->code;
rpcProcessConnError(pContext, NULL);
rpcFreeCont(rpcMsg.pCont);
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index 54372ae8c28d91a72243256a74a8fb53c317eab2..e53d2826c76acb057020b05bfeba4e22cf128c51 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -771,7 +771,7 @@ int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) {
int64_t maxBufSize = 0;
SMFInfo minfo;
- taosHashEmpty(pfs->metaCache);
+ taosHashClear(pfs->metaCache);
// No meta file, just return
if (pfs->cstatus->pmf == NULL) return 0;
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index fc152231def6dde807bc9b0093a3b76f5bffff7f..8266a7c20fd8286afa11e4c3ddbbeed51b503ee6 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -128,6 +128,8 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) {
tsdbSyncCommit(repo);
}
+ tsem_wait(&(pRepo->readyToCommit));
+
tsdbUnRefMemTable(pRepo, pRepo->mem);
tsdbUnRefMemTable(pRepo, pRepo->imem);
pRepo->mem = NULL;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 2d3e7b9e8b63c48ae7da1ba8340d8ceb3abdd87f..22162da5c6624066cdc3bac01054ac5a44d4e1f7 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -364,7 +364,7 @@ static int64_t getEarliestValidTimestamp(STsdbRepo* pTsdb) {
STsdbCfg* pCfg = &pTsdb->config;
int64_t now = taosGetTimestamp(pCfg->precision);
- return now - (tsTickPerDay[pCfg->precision] * pCfg->keep);
+ return now - (tsTickPerDay[pCfg->precision] * pCfg->keep) + 1; // needs to add one tick
}
static void setQueryTimewindow(STsdbQueryHandle* pQueryHandle, STsdbQueryCond* pCond) {
@@ -488,6 +488,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &psTable);
if (pQueryHandle->pTableCheckInfo == NULL) {
tsdbCleanupQueryHandle(pQueryHandle);
+ taosArrayDestroy(psTable);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
@@ -575,8 +576,6 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
}
-
-
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList);
@@ -2957,6 +2956,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
SArray* psTable = NULL;
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pCurrent, pSecQueryHandle->window.skey, &psTable);
if (pSecQueryHandle->pTableCheckInfo == NULL) {
+ taosArrayDestroy(psTable);
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
goto out_of_memory;
}
diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h
index c37426069cf64bb35ac5e1100d49e8103c851625..616b844c1388575130a2b1c02033cfedb7ef9e57 100644
--- a/src/util/inc/hash.h
+++ b/src/util/inc/hash.h
@@ -140,7 +140,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param);
-void taosHashEmpty(SHashObj *pHashObj);
+void taosHashClear(SHashObj *pHashObj);
/**
* clean up hash table
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 5b3218382fff38382a536c8ece077fd2f350adf2..d7bee9b67cad8fe91a182d76a443c04fd82be44c 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -144,6 +144,14 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashObj *pHashObj, SHashEntry*
*/
static void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode);
+/**
+ * Check whether the hash table is empty or not.
+ *
+ * @param pHashObj the hash table object
+ * @return if the hash table is empty or not
+ */
+static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj);
+
/**
* Get the next element in hash table for iterator
* @param pIter
@@ -195,7 +203,16 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp) {
}
}
-int32_t taosHashGetSize(const SHashObj *pHashObj) { return (int32_t)((pHashObj == NULL) ? 0 : pHashObj->size); }
+int32_t taosHashGetSize(const SHashObj *pHashObj) {
+ if (!pHashObj) {
+ return 0;
+ }
+ return (int32_t)atomic_load_64(&pHashObj->size);
+}
+
+static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) {
+ return taosHashGetSize(pHashObj) == 0;
+}
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) {
uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
@@ -281,7 +298,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) {
- if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) {
+ if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) {
return NULL;
}
@@ -338,7 +355,7 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) {
- if (pHashObj == NULL || pHashObj->size <= 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return -1;
}
@@ -405,7 +422,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
}
int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param) {
- if (pHashObj == NULL || pHashObj->size == 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return 0;
}
@@ -478,7 +495,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi
return 0;
}
-void taosHashEmpty(SHashObj *pHashObj) {
+void taosHashClear(SHashObj *pHashObj) {
if (pHashObj == NULL) {
return;
}
@@ -517,7 +534,7 @@ void taosHashCleanup(SHashObj *pHashObj) {
return;
}
- taosHashEmpty(pHashObj);
+ taosHashClear(pHashObj);
tfree(pHashObj->hashList);
// destroy mem block
@@ -535,7 +552,7 @@ void taosHashCleanup(SHashObj *pHashObj) {
// for profile only
int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) {
- if (pHashObj == NULL || pHashObj->size == 0) {
+ if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) {
return 0;
}
diff --git a/src/util/src/tbuffer.c b/src/util/src/tbuffer.c
index abfa35f42cc9798d3a8a7dd7d6b874705ace88ba..c06d1e59bd537bc009b462f0b17b9cdd00321d20 100644
--- a/src/util/src/tbuffer.c
+++ b/src/util/src/tbuffer.c
@@ -18,6 +18,24 @@
#include "exception.h"
#include "taoserror.h"
+typedef union Un4B {
+ uint32_t ui;
+ float f;
+} Un4B;
+#if __STDC_VERSION__ >= 201112L
+static_assert(sizeof(Un4B) == sizeof(uint32_t), "sizeof(Un4B) must equal to sizeof(uint32_t)");
+static_assert(sizeof(Un4B) == sizeof(float), "sizeof(Un4B) must equal to sizeof(float)");
+#endif
+
+typedef union Un8B {
+ uint64_t ull;
+ double d;
+} Un8B;
+#if __STDC_VERSION__ >= 201112L
+static_assert(sizeof(Un8B) == sizeof(uint64_t), "sizeof(Un8B) must equal to sizeof(uint64_t)");
+static_assert(sizeof(Un8B) == sizeof(double), "sizeof(Un8B) must equal to sizeof(double)");
+#endif
+
////////////////////////////////////////////////////////////////////////////////
// reader functions
@@ -175,13 +193,21 @@ uint64_t tbufReadUint64( SBufferReader* buf ) {
}
float tbufReadFloat( SBufferReader* buf ) {
- uint32_t ret = tbufReadUint32( buf );
- return *(float*)( &ret );
+ Un4B _un;
+ tbufReadToBuffer( buf, &_un, sizeof(_un) );
+ if( buf->endian ) {
+ _un.ui = ntohl( _un.ui );
+ }
+ return _un.f;
}
double tbufReadDouble(SBufferReader* buf) {
- uint64_t ret = tbufReadUint64( buf );
- return *(double*)( &ret );
+ Un8B _un;
+ tbufReadToBuffer( buf, &_un, sizeof(_un) );
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ return _un.d;
}
////////////////////////////////////////////////////////////////////////////////
@@ -381,17 +407,37 @@ void tbufWriteUint64At( SBufferWriter* buf, size_t pos, uint64_t data ) {
}
void tbufWriteFloat( SBufferWriter* buf, float data ) {
- tbufWriteUint32( buf, *(uint32_t*)(&data) );
+ Un4B _un;
+ _un.f = data;
+ if( buf->endian ) {
+ _un.ui = htonl( _un.ui );
+ }
+ tbufWrite( buf, &_un, sizeof(_un) );
}
void tbufWriteFloatAt( SBufferWriter* buf, size_t pos, float data ) {
- tbufWriteUint32At( buf, pos, *(uint32_t*)(&data) );
+ Un4B _un;
+ _un.f = data;
+ if( buf->endian ) {
+ _un.ui = htonl( _un.ui );
+ }
+ tbufWriteAt( buf, pos, &_un, sizeof(_un) );
}
void tbufWriteDouble( SBufferWriter* buf, double data ) {
- tbufWriteUint64( buf, *(uint64_t*)(&data) );
+ Un8B _un;
+ _un.d = data;
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ tbufWrite( buf, &_un, sizeof(_un) );
}
void tbufWriteDoubleAt( SBufferWriter* buf, size_t pos, double data ) {
- tbufWriteUint64At( buf, pos, *(uint64_t*)(&data) );
+ Un8B _un;
+ _un.d = data;
+ if( buf->endian ) {
+ _un.ull = htobe64( _un.ull );
+ }
+ tbufWriteAt( buf, pos, &_un, sizeof(_un) );
}
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 442e83bb4f76499d7ce39792fc188d61536910c2..80071986d6d2396ef2aec8f7841b0897cb3d7b26 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -312,6 +312,9 @@ void taosReadGlobalLogCfg() {
#ifdef _TD_POWER_
printf("configDir:%s not there, use default value: /etc/power", configDir);
strcpy(configDir, "/etc/power");
+ #elif (_TD_TQ_ == true)
+ printf("configDir:%s not there, use default value: /etc/tq", configDir);
+ strcpy(configDir, "/etc/tq");
#else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
@@ -327,7 +330,8 @@ void taosReadGlobalLogCfg() {
printf("\nconfig file:%s not found, all variables are set to default\n", fileName);
return;
}
-
+
+ ssize_t _bytes = 0;
size_t len = 1024;
line = calloc(1, len);
@@ -337,7 +341,12 @@ void taosReadGlobalLogCfg() {
option = value = NULL;
olen = vlen = 0;
- tgetline(&line, &len, fp);
+ _bytes = tgetline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
paGetToken(line, &option, &olen);
@@ -373,7 +382,8 @@ bool taosReadGlobalCfg() {
return false;
}
}
-
+
+ ssize_t _bytes = 0;
size_t len = 1024;
line = calloc(1, len);
@@ -383,7 +393,12 @@ bool taosReadGlobalCfg() {
option = value = value2 = value3 = NULL;
olen = vlen = vlen2 = vlen3 = 0;
- tgetline(&line, &len, fp);
+ _bytes = tgetline(&line, &len, fp);
+ if (_bytes < 0)
+ {
+ break;
+ }
+
line[len - 1] = 0;
paGetToken(line, &option, &olen);
diff --git a/src/util/src/terror.c b/src/util/src/terror.c
index 1d37a6e9a4b740d2cfabb18bd3d2252fa6bfd034..27a08d8e9e614db628edc843d006530bd5503617 100644
--- a/src/util/src/terror.c
+++ b/src/util/src/terror.c
@@ -207,6 +207,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, "No permission for dis
TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message length")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories")
+TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress")
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index 7f127fc396a13f0a7796dcb4ce1dd63ce96cb951..45ff14ffa4adcd018cbf7a7d69b8644582855ab3 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -83,8 +83,10 @@ int64_t dbgWSize = 0;
#ifdef _TD_POWER_
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
+#elif (_TD_TQ_ == true)
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq";
#else
-char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/taos";
+char tsLogDir[PATH_MAX] = "/var/log/taos";
#endif
static SLogObj tsLogObj = { .fileNum = 1 };
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index bda52936f90d07b1fde598de9ca683c8a1b8b82a..1a73991ade1ea4617fc4d3dab3904652ff46d691 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -427,13 +427,23 @@ char *taosIpStr(uint32_t ipInt) {
}
FORCE_INLINE float taos_align_get_float(const char* pBuf) {
- float fv = 0;
- *(int32_t*)(&fv) = *(int32_t*)pBuf;
+#if __STDC_VERSION__ >= 201112L
+ static_assert(sizeof(float) == sizeof(uint32_t), "sizeof(float) must equal to sizeof(uint32_t)");
+#else
+ assert(sizeof(float) == sizeof(uint32_t));
+#endif
+ float fv = 0;
+ memcpy(&fv, pBuf, sizeof(fv)); // in ARM, return *((const float*)(pBuf)) may cause problem
return fv;
}
FORCE_INLINE double taos_align_get_double(const char* pBuf) {
- double dv = 0;
- *(int64_t*)(&dv) = *(int64_t*)pBuf;
+#if __STDC_VERSION__ >= 201112L
+ static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) must equal to sizeof(uint64_t)");
+#else
+ assert(sizeof(double) == sizeof(uint64_t));
+#endif
+ double dv = 0;
+ memcpy(&dv, pBuf, sizeof(dv)); // in ARM, return *((const double*)(pBuf)) may cause problem
return dv;
}
diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt
index ee99348cd9db86923f2ba06da9b3452d2dcc0347..69108f6fa67154ce79673b94f47ac19b645bd985 100644
--- a/src/util/tests/CMakeLists.txt
+++ b/src/util/tests/CMakeLists.txt
@@ -2,14 +2,15 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
-FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib)
+FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
+FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
-IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
+IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
-
+
LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c)
ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov)
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index d770a38e371c9920c438d810f699ab399be15833..ef05cf4a4063625d8e2810503e541fd32a7f8f62 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -41,6 +41,8 @@ typedef struct {
int32_t queuedWMsg;
int32_t queuedRMsg;
int32_t flowctrlLevel;
+ int8_t preClose; // drop and close switch
+ int8_t reserved[3];
int64_t sequence; // for topic
int8_t status;
int8_t role;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 074f0f681faaa89ccf7a14da0da43774a76d647b..979e4e4cddbd3fe4d0b349fb3c2c168d0cd67e89 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -93,7 +93,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
}
int32_t vnodeSync(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vDebug("vgId:%d, failed to sync, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
@@ -116,7 +116,7 @@ int32_t vnodeSync(int32_t vgId) {
int32_t vnodeDrop(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) {
vDebug("vgId:%d, failed to drop, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
@@ -439,15 +439,16 @@ int32_t vnodeOpen(int32_t vgId) {
}
int32_t vnodeClose(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAcquire(vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) return 0;
if (pVnode->dropped) {
vnodeRelease(pVnode);
return 0;
}
+ pVnode->preClose = 1;
+
vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode);
- vnodeRemoveFromHash(pVnode);
vnodeRelease(pVnode);
vnodeCleanUp(pVnode);
diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c
index 5a0bafe82301d568da20cd45b0daeeb37995127d..8b17d3a5f2b8871aa83d4daf81ff936773de736a 100644
--- a/src/vnode/src/vnodeMgmt.c
+++ b/src/vnode/src/vnodeMgmt.c
@@ -125,6 +125,18 @@ void vnodeRelease(void *vparam) {
}
}
+void *vnodeAcquireNotClose(int32_t vgId) {
+ SVnodeObj *pVnode = vnodeAcquire(vgId);
+ if (pVnode != NULL && pVnode->preClose == 1) {
+ vnodeRelease(pVnode);
+ terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
+ vDebug("vgId:%d, not exist, pre closing", vgId);
+ return NULL;
+ }
+
+ return pVnode;
+}
+
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) {
int64_t totalStorage = 0;
int64_t compStorage = 0;
@@ -188,7 +200,7 @@ void vnodeBuildStatusMsg(void *param) {
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId);
- SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId);
+ SVnodeObj *pVnode = vnodeAcquireNotClose(pAccess[i].vgId);
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c
index 555eda6d13eeb1dbbb83fbd89ee2672966aa8539..a7c418711de5bae2e1e98c90a72a1b2a9aa06d6f 100644
--- a/src/vnode/src/vnodeWrite.c
+++ b/src/vnode/src/vnodeWrite.c
@@ -396,10 +396,13 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
}
void vnodeWaitWriteCompleted(SVnodeObj *pVnode) {
+ int32_t extraSleep = 0;
while (pVnode->queuedWMsg > 0) {
vTrace("vgId:%d, queued wmsg num:%d", pVnode->vgId, pVnode->queuedWMsg);
taosMsleep(10);
+ extraSleep = 1;
}
- taosMsleep(900);
+ if (extraSleep)
+ taosMsleep(900);
}
diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile
index a93b412ab90658231bb9a33783e8984ac0883901..c75427b5f4e568553dbcd9e2686f529a2745c029 100644
--- a/tests/Jenkinsfile
+++ b/tests/Jenkinsfile
@@ -114,14 +114,6 @@ pipeline {
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
'''
}
- catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- sh '''
- cd ${WKC}/src/connector/jdbc
- mvn clean package -Dmaven.test.skip=true >/dev/null
- cd ${WKC}/tests/examples/JDBC/JDBCDemo/
- java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
- '''
- }
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml
index 84467003f905c454bb285088e56bcd26465a3e5e..045e9d336c048c990049e76623fa2427d3f1c911 100644
--- a/tests/examples/JDBC/connectionPools/pom.xml
+++ b/tests/examples/JDBC/connectionPools/pom.xml
@@ -9,12 +9,12 @@
1.0-SNAPSHOT
+
com.taosdata.jdbc
taos-jdbcdriver
2.0.18
-
com.alibaba
@@ -50,6 +50,12 @@
log4j
1.2.17
+
+
+ com.cloudhopper.proxool
+ proxool
+ 0.9.1
+
@@ -57,25 +63,46 @@
org.apache.maven.plugins
maven-assembly-plugin
- 3.1.0
-
-
-
- com.taosdata.example.ConnectionPoolDemo
-
-
-
- jar-with-dependencies
-
-
+ 3.3.0
- make-assembly
+ ConnectionPoolDemo
+
+ ConnectionPoolDemo
+
+
+ com.taosdata.example.ConnectionPoolDemo
+
+
+
+ jar-with-dependencies
+
+
package
single
+
+
+ ProxoolDemo
+
+ ProxoolDemo
+
+
+ com.taosdata.example.ProxoolDemo
+
+
+
+ jar-with-dependencies
+
+
+ package
+
+ single
+
+
+
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java
new file mode 100644
index 0000000000000000000000000000000000000000..632ad8c9bf69d13d137d06c1f23c964904c8e050
--- /dev/null
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java
@@ -0,0 +1,56 @@
+package com.taosdata.example;
+
+import org.logicalcobwebs.proxool.ProxoolException;
+import org.logicalcobwebs.proxool.configuration.JAXPConfigurator;
+
+import java.sql.*;
+
+public class ProxoolDemo {
+
+
+ public static void main(String[] args) {
+
+ String xml = parseConfigurationXml(args);
+ if (xml == null) {
+ printHelp();
+ System.exit(0);
+ }
+
+ try {
+ JAXPConfigurator.configure(xml, false);
+ Class.forName("org.logicalcobwebs.proxool.ProxoolDriver");
+ Connection connection = DriverManager.getConnection("proxool.ds");
+
+ Statement stmt = connection.createStatement();
+
+ ResultSet rs = stmt.executeQuery("show databases");
+ ResultSetMetaData metaData = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ System.out.print(metaData.getColumnLabel(i) + ": " + rs.getString(i));
+ }
+ System.out.println();
+ }
+
+ stmt.close();
+
+ } catch (ClassNotFoundException | SQLException | ProxoolException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private static String parseConfigurationXml(String[] args) {
+ String host = null;
+ for (int i = 0; i < args.length; i++) {
+ if ("--xml".equalsIgnoreCase(args[i]) && i < args.length - 1) {
+ host = args[++i];
+ }
+ }
+ return host;
+ }
+
+ private static void printHelp() {
+ System.out.println("Usage: java -jar ProxoolDemo.jar --xml [xml]");
+ }
+
+}
diff --git a/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml
new file mode 100644
index 0000000000000000000000000000000000000000..67baa1c3931aa57591af8fc306ed441328606978
--- /dev/null
+++ b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml
@@ -0,0 +1,27 @@
+
+
+
+ ds
+
+ jdbc:TAOS-RS://127.0.0.1:6041/log
+
+ com.taosdata.jdbc.rs.RestfulDriver
+
+
+
+
+
+
+ 100
+
+ 100
+
+ 1
+
+ 5
+
+ 30000
+
+ select server_status()
+
+
\ No newline at end of file
diff --git a/tests/pytest/dbmgmt/nanoSecondCheck.py b/tests/pytest/dbmgmt/nanoSecondCheck.py
index 27050a2213f7e6bddeb5cc6135c7fe4760018f61..a5e9adacee53a9172a2d8990ccc4d83feb983bdd 100644
--- a/tests/pytest/dbmgmt/nanoSecondCheck.py
+++ b/tests/pytest/dbmgmt/nanoSecondCheck.py
@@ -99,6 +99,15 @@ class TDTestCase:
tdSql.query('select avg(speed) from tb interval(100000000b)')
tdSql.checkRows(4)
+ tdSql.error('select avg(speed) from tb interval(1b);')
+ tdSql.error('select avg(speed) from tb interval(999b);')
+
+ tdSql.query('select avg(speed) from tb interval(1000b);')
+ tdSql.checkRows(5)
+
+ tdSql.query('select avg(speed) from tb interval(1u);')
+ tdSql.checkRows(5)
+
tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);')
tdSql.checkRows(4)
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 17d0bbe190237849a461de04b6286763ea313646..2cbc3747f6b15862891ea34c816654ac92e9f390 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -238,6 +238,7 @@ python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
# python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py -f query/queryStateWindow.py
+python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
#stream
diff --git a/tests/pytest/insert/retentionpolicy.py b/tests/pytest/insert/retentionpolicy.py
index af933aafab13eabd930810034994f92fd1818167..607ee26a59969f1ecafd4160e4f9db58e3af568a 100644
--- a/tests/pytest/insert/retentionpolicy.py
+++ b/tests/pytest/insert/retentionpolicy.py
@@ -117,6 +117,7 @@ class TDTestRetetion:
self.checkRows(4,cmd)
while datetime.datetime.now() <= (ttime + datetime.timedelta(hours=72)):
time.sleep(0.001)
+ time.sleep(0.01)
cmd = 'select * from test'
self.queryRows=tdSql.query(cmd)
print(tdSql.queryResult)
diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
new file mode 100644
index 0000000000000000000000000000000000000000..26eda1120b6026655add2bcf6c601bf8dd22c54a
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
@@ -0,0 +1,79 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+import random
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1593548685000
+ self.tables = 10
+ self.rowsPerTable = 100
+
+
+ def run(self):
+ # tdSql.execute("drop database db ")
+ tdSql.prepare()
+ tdSql.execute("create table st (ts timestamp, num int, value int) tags (loc nchar(30))")
+ for i in range(self.tables):
+ for j in range(self.rowsPerTable):
+ args1=(i, i, self.ts + i * self.rowsPerTable + j * 10000, i, random.randint(1, 100))
+ tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d)" % args1)
+
+ tdSql.query("select * from (select * from st)")
+ tdSql.checkRows(self.tables * self.rowsPerTable)
+
+ tdSql.query("select * from (select * from st limit 10)")
+ tdSql.checkRows(10)
+
+ tdSql.query("select * from (select * from st order by ts desc limit 10)")
+ tdSql.checkRows(10)
+
+ # bug: https://jira.taosdata.com:18080/browse/TD-5043
+ tdSql.query("select * from (select * from st order by ts desc limit 10 offset 1000)")
+ tdSql.checkRows(0)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname")
+ tdSql.checkRows(self.tables)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname)")
+ tdSql.checkRows(self.tables)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5")
+ tdSql.checkRows(5)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5 soffset 7")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)")
+ tdSql.checkRows(3)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py
index ce8d05ae50aa61646b20e07e9484d00559e92b49..d61e8cf288c97fc869f19cba6bd3d181dc60797c 100644
--- a/tests/pytest/query/queryInterval.py
+++ b/tests/pytest/query/queryInterval.py
@@ -114,8 +114,7 @@ class TDTestCase:
tdSql.query("select first(ts),twa(c) from tb interval(14a)")
tdSql.checkRows(6)
- tdSql.query("select twa(c) from tb group by c")
- tdSql.checkRows(4)
+ tdSql.error("select twa(c) from tb group by c")
def stop(self):
diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py
index 720ae745cb9b3780f1ca7ffaf96d76eda5f307b1..742a3c2cd1907107c7cca54c7fb37862227b077f 100644
--- a/tests/pytest/query/queryPerformance.py
+++ b/tests/pytest/query/queryPerformance.py
@@ -45,28 +45,38 @@ class taosdemoQueryPerformace:
sql = "select count(*) from test.meters"
tableid = 1
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.meters"
tableid = 2
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select count(*) from test.meters where loc='beijing'"
tableid = 3
cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.meters where areaid=10"
tableid = 4
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from test.t10 interval(10s)"
tableid = 5
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select last_row(*) from meters"
tableid = 6
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select * from meters"
tableid = 7
cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
+
sql = "select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'"
tableid = 8
cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
-
+
+ sql = "select last(*) from meters"
+ tableid = 9
+ cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
cursor.close()
def query(self):
diff --git a/tests/pytest/tools/taosdemoAllTest/sub.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
similarity index 58%
rename from tests/pytest/tools/taosdemoAllTest/sub.json
rename to tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
index fe3c892a76bcc30678f60127d28ce79bf8682c18..93462d2c66cea62c21f0cc196652c94439f47bc0 100644
--- a/tests/pytest/tools/taosdemoAllTest/sub.json
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json
@@ -9,29 +9,33 @@
"confirm_parameter_prompt": "no",
"specified_table_query":
{
- "concurrent":1,
+ "concurrent":2,
"mode":"sync",
"interval":0,
- "restart":"yes",
+ "resubAfterConsume":1,
+ "endAfterConsume":1,
"keepProgress":"yes",
+ "restart":"no",
"sqls": [
{
- "sql": "select * from stb00_0 ;",
+ "sql": "select * from stb00_0",
"result": "./subscribe_res0.txt"
}]
},
"super_table_query":
{
"stblname": "stb0",
- "threads":1,
+ "threads":2,
"mode":"sync",
- "interval":10000,
- "restart":"yes",
+ "interval":1000,
+ "resubAfterConsume":1,
+ "endAfterConsume":1,
"keepProgress":"yes",
+ "restart":"no",
"sqls": [
{
- "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
- "result": "./subscribe_res1.txt"
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
}]
}
}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json
new file mode 100644
index 0000000000000000000000000000000000000000..4229f304e44fcda58a0e16b1e6445ebd339215d3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac221905655e53e9053336be8fcaaa8b1070639c
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":-1,
+ "endAfterConsume":-1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json
new file mode 100644
index 0000000000000000000000000000000000000000..7d937212c94bd002307695c7059d67ad0a4e68d3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":0,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":0,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json
new file mode 100644
index 0000000000000000000000000000000000000000..bf8927a58badfa606103d4b11d09f871ed64260f
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json
@@ -0,0 +1,41 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":1000,
+ "resubAfterConsume":-1,
+ "endAfterConsume":2,
+ "keepProgress":"no",
+ "restart":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
index 1401716da9095b44aa47e9ecb2e7131bc0a8b9ea..fe29409f296b310012773b9d78ca8735cfd52a13 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
@@ -78,7 +78,7 @@ class TDTestCase:
tdSql.checkData(0, 0, "%d" % suc_kill)
os.system("rm -rf querySystemInfo*")
os.system("rm -rf insert_res.txt")
- os.system("rm -rf insert_res.txt")
+ os.system("rm -rf query_res.txt")
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
index a92906fa730833108ad758d3fc53c954279abe38..62b6e7472aa779888a45603b06cf54a528923dec 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
+++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
@@ -13,7 +13,7 @@
"sqls":[
{
"sql": "select * from stb0",
- "result": ""
+ "result": "./query_res.txt"
}
]
}
diff --git a/tests/pytest/tools/taosdemoAllTest/convertResFile.py b/tests/pytest/tools/taosdemoAllTest/convertResFile.py
index 52bb8f40d0f0a5a55450ecb4927067f37f862499..5ed2fec13b1e0722937023d829e5e9b9fa1ad623 100644
--- a/tests/pytest/tools/taosdemoAllTest/convertResFile.py
+++ b/tests/pytest/tools/taosdemoAllTest/convertResFile.py
@@ -2,6 +2,14 @@ from datetime import datetime
import time
import os
+# class FileSeparaSpaceConvertcomma:
+# def __init__(self):
+# self.inputfile = ""
+# self.oputfile = ""
+# self.affectedRows = 0
+
+# def ConvertFile(self, inputfile,):
+
os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt")
with open('./new_query_res0.txt','r+') as f0:
contents = f0.readlines()
diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
index f2dca662fddc5991a9dcdb8371dc0e4086868190..0ae3a7194f8320b3919f850e19861f7796d2a5cc 100644
--- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
+++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json
@@ -8,7 +8,7 @@
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
- "confirm_parameter_prompt": "no",
+ "confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
index 55be0198916e3737d185deaa231885fbfa607c66..cd69badad154c6417d0e8d57f4d252354d40ad6b 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
@@ -71,7 +71,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
@@ -97,7 +97,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
@@ -123,7 +123,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 1000000,
+ "interlace_rows": 100,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b56830189623d344168918f239887c3359b2645
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1000,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json
new file mode 100644
index 0000000000000000000000000000000000000000..91234d5e48af891c4dfd0fdfd88121e123bf4edc
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 50000,
+ "num_of_records_per_req": 50000,
+ "max_sql_len": 1025000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 32767,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json
new file mode 100644
index 0000000000000000000000000000000000000000..d05e1c249f25c17c37e40626bf0d3c5a96e5fffe
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 100,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "rest",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
index 5cf8114472e00d5ebc90b5dc762f22f9698f7d76..88218b4989d5e01178142aa9acf2332b34718826 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json
@@ -11,8 +11,8 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
- "num_of_records_per_req": 100,
- "max_sql_len": 10240000000,
+ "num_of_records_per_req": 1000000,
+ "max_sql_len": 1024000000,
"databases": [{
"dbinfo": {
"name": "db1",
@@ -45,7 +45,7 @@
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
+ "interlace_rows": 10000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
index 703f755c31c7b325e34b93878e2e3175648834ef..077ced5d02c792b1c3344ea3e8b129038652b4b8 100644
--- a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
+++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
@@ -60,7 +60,7 @@ class TDTestCase:
tdSql.checkData(0, 0, 1000000)
os.system("rm -rf ./insert_res.txt")
- os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql")
+ os.system("rm -rf tools/taosdemoAllTest/moredemo-insert-offset.py.sql")
def stop(self):
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
new file mode 100644
index 0000000000000000000000000000000000000000..69557a784180acec3c6de059b9285df4d4b31456
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 0,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..9074ae8fd1049d2dbaedfff881feefd84583ca20
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": -1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json
new file mode 100644
index 0000000000000000000000000000000000000000..fd047dec9497c64f8b8f4300617fcc90563b67bc
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 0,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..96a54cfd09cda09f8a9ebed169527c13092c7d57
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times":3,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": -1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
similarity index 89%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
rename to tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
index 79471be2044d3ea7c637b4b1e500cfcc8e6413a9..99138e36668971ee2e9aa0656b2ee76f262723e3 100644
--- a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
+++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json
@@ -35,7 +35,7 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
- "childtable_count": 100,
+ "childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
@@ -54,13 +54,13 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
- "childtable_count": 100,
+ "childtable_count": 10,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
@@ -79,7 +79,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json
new file mode 100644
index 0000000000000000000000000000000000000000..747f7b3c7e9ebb5720cae98811e136ece74d47e2
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 5,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0 ,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json
index 67a1cf3eb39c045192b5d35f698e38506777cef2..7ebad5e2b2f5af687656c8eed041579d7de1e2c2 100644
--- a/tests/pytest/tools/taosdemoAllTest/queryQps.json
+++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json
@@ -9,8 +9,8 @@
"databases": "db",
"query_times": 1,
"specified_table_query": {
- "query_interval": 0,
- "concurrent": 1,
+ "query_interval": 10000,
+ "concurrent": 4,
"sqls": [
{
"sql": "select last_row(*) from stb00_0",
@@ -24,8 +24,8 @@
},
"super_table_query": {
"stblname": "stb1",
- "query_interval":0,
- "threads": 1,
+ "query_interval":20000,
+ "threads": 4,
"sqls": [
{
"sql": "select last_row(ts) from xxxx",
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json b/tests/pytest/tools/taosdemoAllTest/queryRestful.json
similarity index 100%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json
rename to tests/pytest/tools/taosdemoAllTest/queryRestful.json
diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json b/tests/pytest/tools/taosdemoAllTest/queryTaosc.json
similarity index 100%
rename from tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json
rename to tests/pytest/tools/taosdemoAllTest/queryTaosc.json
diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimes0.json b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json
new file mode 100644
index 0000000000000000000000000000000000000000..63a13587728fa797a65794994c04378edb87a0c5
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": 0,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json
new file mode 100644
index 0000000000000000000000000000000000000000..039f7e10603cd2d06608cb24a2cb72356bd50728
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json
@@ -0,0 +1,37 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": -1,
+ "specified_table_query": {
+ "query_interval": 0,
+ "concurrent": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(*) from stb00_0",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb00_99 ",
+ "result": "./query_res1.txt"
+
+ }]
+ },
+ "super_table_query": {
+ "stblname": "stb1",
+ "query_interval":0,
+ "threads": 1,
+ "sqls": [
+ {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "./query_res2.txt"
+ }
+ ]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subAsync.json b/tests/pytest/tools/taosdemoAllTest/subAsync.json
new file mode 100644
index 0000000000000000000000000000000000000000..67a3bf5aab85bc540b4b891039ba59960ff3f4b1
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subAsync.json
@@ -0,0 +1,45 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"async",
+ "interval":0,
+ "restart":"yes",
+ "keepProgress":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": "./subscribe_res1.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"async",
+ "interval":0,
+ "restart":"yes",
+ "keepProgress":"yes",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
index 7d14d0ad4b888fc099becb176e84af54bb769f50..1f9d794990dcbc0daaee2076f2ae6dfd1249b132 100644
--- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
+++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json
@@ -35,26 +35,26 @@
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
- "childtable_count": 1,
+ "childtable_count": 2,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 1,
+ "insert_rows": 10,
"childtable_limit": 0,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
- "disorder_range": 0,
+ "disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-02-25 10:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "BINARY", "len":50, "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..d5d0578f07526c18d541391597a3236c99f27544
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 200,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-02-25 10:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 20,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-02-25 10:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSync.json b/tests/pytest/tools/taosdemoAllTest/subSync.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa0b2cd7a4b454fd3332d72a521d244b5e567869
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSync.json
@@ -0,0 +1,45 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": "./subscribe_res1.txt"
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":10000,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json
new file mode 100644
index 0000000000000000000000000000000000000000..625e4792cfa113166e0ba5b0ef068bb2109bf027
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json
@@ -0,0 +1,49 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": ""
+ },
+ {
+ "sql": "select ts from stb00_1",
+ "result": ""
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":10000,
+ "restart":"no",
+ "keepProgress":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": ""
+ },
+ {
+ "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ",
+ "result": ""
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b2828822e4a35989ff9c0d69a469bb34a0e7a84
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json
@@ -0,0 +1,439 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res1.txt"
+ },
+ {
+ "sql": "select * from stb00_1",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_2",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_3",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_4",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_5",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_6",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_7",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_8",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_9",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_10 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_11 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_12 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_13 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_14 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_15 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_16 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_17 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_18 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_19 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_20 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_21 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_22 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_23 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_24 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_25 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_26 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_27 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_28 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_29 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_30 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_31 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_32 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_33 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_34 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_35 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_36 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_37 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_38 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_39 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_40 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_41 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_42 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_43 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_44 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_45 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_46 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_47 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_48 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_49 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_50 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_51 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_52 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_53 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_54 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_55 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_56 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_57 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_58 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_59 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_60",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_61",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_62",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_63",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_64",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_65",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_66",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_67",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_68",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_69",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_70 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_71 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_72 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_73 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_74 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_75 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_76 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_77 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_78 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_79 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_80 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_81 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_82 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_83 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_84 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_85 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_86 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_87 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_88 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_89 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_90 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_91 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_92 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_93 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_94 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_95 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_96 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_97 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_98 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json
new file mode 100644
index 0000000000000000000000000000000000000000..c45a9ea48a147ae96256de60ab6d1f9c579f1431
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json
@@ -0,0 +1,439 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"async",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from stb00_0",
+ "result": "./subscribe_res1.txt"
+ },
+ {
+ "sql": "select * from stb00_1",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_2",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_3",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_4",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_5",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_6",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_7",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_8",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_9",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_10 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_11 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_12 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_13 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_14 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_15 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_16 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_17 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_18 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_19 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_20 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_21 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_22 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_23 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_24 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_25 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_26 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_27 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_28 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_29 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_30 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_31 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_32 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_33 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_34 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_35 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_36 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_37 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_38 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_39 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_40 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_41 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_42 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_43 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_44 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_45 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_46 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_47 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_48 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_49 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_50 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_51 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_52 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_53 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_54 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_55 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_56 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_57 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_58 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_59 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_60",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_61",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_62",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_63",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_64",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_65",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_66",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_67",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_68",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_69",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_70 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_71 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_72 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_73 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_74 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_75 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_76 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_77 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_78 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_79 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_80 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_81 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_82 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_83 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_84 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_85 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_86 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_87 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_88 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_89 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_90 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_91 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_92 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_93 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_94 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_95 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_96 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_97 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_98 ",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ },
+ {
+ "sql": "select * from stb00_99 ",
+ "result": "./subscribe_res0.txt"
+
+ }]
+ },
+ "super_table_query":
+ {
+ "stblname": "stb0",
+ "threads":2,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ",
+ "result": "./subscribe_res2.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json
new file mode 100644
index 0000000000000000000000000000000000000000..3214d35bf04aa3b66b336734469539f42ea50c4c
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json
@@ -0,0 +1,426 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "super_table_query":
+ {
+ "stblname": "stb1",
+ "threads":4,
+ "mode":"sync",
+ "interval":0,
+ "restart":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json
new file mode 100644
index 0000000000000000000000000000000000000000..075ec9cf5dc5fc75da2da4cde8a9358799af7cb9
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json
@@ -0,0 +1,426 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "super_table_query":
+ {
+ "stblname": "stb1",
+ "threads":4,
+ "mode":"async",
+ "interval":0,
+ "restart":"no",
+ "resubAfterConsume":-1,
+ "endAfterConsume":1,
+ "keepProgress":"no",
+ "sqls": [
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res2.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ },
+ {
+ "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ",
+ "result": "./subscribe_res3.txt"
+ }]
+ }
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/sub_no_result.json b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json
new file mode 100644
index 0000000000000000000000000000000000000000..cdf7c2314ede28e9c3ccaa9d53864737ff3fac96
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json
@@ -0,0 +1,25 @@
+{
+ "filetype": "subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":1,
+ "mode":"sync",
+ "interval": 0,
+ "restart":"yes",
+ "keepProgress":"no",
+ "endAfterConsume": 1100000,
+ "sqls": [
+ {
+ "sql": "select * from st;",
+ "result": ""
+ }]
+ }
+ }
+
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py
new file mode 100644
index 0000000000000000000000000000000000000000..270eea17cb6c913719fb67c4b8f33065b0a0445d
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py
@@ -0,0 +1,82 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import time
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+import _thread
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1601481600000
+ self.numberOfRecords = 1100000
+
+ def execCmdAndGetOutput(self, cmd):
+ r = os.popen(cmd)
+ text = r.read()
+ r.close()
+ return text
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ tdSql.prepare()
+ tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
+ tdSql.execute("create table t1 using st tags(0)")
+ currts = self.ts
+ finish = 0
+ while(finish < self.numberOfRecords):
+ sql = "insert into t1 values"
+ for i in range(finish, self.numberOfRecords):
+ sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
+ finish = i + 1
+ if (1048576 - len(sql)) < 16384:
+ break
+ tdSql.execute(sql)
+
+ binPath = buildPath+ "/build/bin/"
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/sub_no_result.json -g 2>&1 | tee sub_no_result.log" % binPath)
+ test_line = int(self.execCmdAndGetOutput("cat sub_no_result.log | wc -l"))
+ if(test_line < 1100024):
+ tdLog.exit("failed test subscribeNoResult: %d != expected(1100024)" % test_line)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
index 638a9c49b9b8cfe0864e4a158d3bb9ffe0b7985f..01e46eaaa00326c0da2aa2f61bb14a7349f3ca7f 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
@@ -64,6 +64,22 @@ class TDTestCase:
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
+ # restful connector insert data
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb01_1")
+ tdSql.checkData(0, 0, 20)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 200)
+
# insert: create mutiple tables per sql and insert one rows per sql .
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath)
@@ -165,6 +181,10 @@ class TDTestCase:
tdSql.query("select count(*) from db.stb0")
tdSql.checkData(0, 0, 10000)
tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath)
+ tdSql.query("select count(*) from db.stb0")
+ tdSql.checkRows(0)
+ tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("show stables like 'stb0%' ")
@@ -201,6 +221,12 @@ class TDTestCase:
tdSql.checkData(0, 0, "2019-10-01 00:00:00")
tdSql.query("select last(ts) from blf.p_0_topics_6 ")
tdSql.checkData(0, 0, "2020-09-29 23:59:00")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 5000000)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 5000000)
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
index 643cad942c6586486640ba125d520b46c93e3465..6021c9136ad235f3e9d07bb4f6654fdac54989e5 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
@@ -19,6 +19,9 @@ from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
+import ast
+# from assertpy import assert_that
+import subprocess
class TDTestCase:
def init(self, conn, logSql):
@@ -40,85 +43,145 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
-
+
+ # 获取taosc接口查询的结果文件中的内容,返回每行数据,并断言数据的第一列内容。
+ def assertfileDataTaosc(self,filename,expectResult):
+ self.filename = filename
+ self.expectResult = expectResult
+ with open("%s" % filename, 'r+') as f1:
+ for line in f1.readlines():
+ queryResult = line.strip().split()[0]
+ self.assertCheck(filename,queryResult,expectResult)
+
+ # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。
+ def getfileDataRestful(self,filename):
+ self.filename = filename
+ with open("%s" % filename, 'r+') as f1:
+ for line in f1.readlines():
+ contents = line.strip()
+ if contents.find("data") != -1:
+ contentsDict = ast.literal_eval(contents) # 字符串转换为字典
+ queryResult = contentsDict['data'][0][0]
+ break
+ return queryResult
+
+ # 获取taosc接口查询次数
+ def queryTimesTaosc(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # 获取restful接口查询次数
+ def queryTimesRestful(self,filename):
+ self.filename = filename
+ command = 'cat %s |grep "200 OK" |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # 定义断言结果是否正确。不正确返回错误结果,正确即通过。
+ def assertCheck(self,filename,queryResult,expectResult):
+ self.filename = filename
+ self.queryResult = queryResult
+ self.expectResult = expectResult
+ args0 = (filename, queryResult, expectResult)
+ assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
+ binPath = buildPath+ "/build/bin/"
+
+ # delete useless files
+ os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./all_query*")
+
+ # taosc query: query specified table and query super table
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" % binPath)
+ os.system("cat query_res0.txt* > all_query_res0_taosc.txt")
+ os.system("cat query_res1.txt* > all_query_res1_taosc.txt")
+ os.system("cat query_res2.txt* > all_query_res2_taosc.txt")
+
+ # correct Times testcases
+ queryTimes0Taosc = self.queryTimesTaosc("all_query_res0_taosc.txt")
+ self.assertCheck("all_query_res0_taosc.txt",queryTimes0Taosc,6)
+
+ queryTimes1Taosc = self.queryTimesTaosc("all_query_res1_taosc.txt")
+ self.assertCheck("all_query_res1_taosc.txt",queryTimes1Taosc,6)
+
+ queryTimes2Taosc = self.queryTimesTaosc("all_query_res2_taosc.txt")
+ self.assertCheck("all_query_res2_taosc.txt",queryTimes2Taosc,20)
+
+ # correct data testcase
+ self.assertfileDataTaosc("all_query_res0_taosc.txt","1604160000099")
+ self.assertfileDataTaosc("all_query_res1_taosc.txt","100")
+ self.assertfileDataTaosc("all_query_res2_taosc.txt","1604160000199")
- # query: query specified table and query super table
- os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryTaosc.json" % binPath)
- os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- tdSql.execute("use db")
- tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- tdSql.execute("insert into result0 file './test_query_res0.txt'")
- tdSql.query("select ts from result0")
- tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- tdSql.query("select count(*) from result0")
- tdSql.checkData(0, 0, 1)
- with open('./all_query_res1.txt','r+') as f1:
- result1 = int(f1.readline())
- tdSql.query("select count(*) from stb00_1")
- tdSql.checkData(0, 0, "%d" % result1)
-
- with open('./all_query_res2.txt','r+') as f2:
- result2 = int(f2.readline())
- d2 = datetime.fromtimestamp(result2/1000)
- timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- tdSql.query("select last_row(ts) from stb1")
- tdSql.checkData(0, 0, "%s" % timest)
+ # delete useless files
+ os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./all_query*")
+
+
+ # use restful api to query
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" % binPath)
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryRestful.json" % binPath)
+ os.system("cat query_res0.txt* > all_query_res0_rest.txt")
+ os.system("cat query_res1.txt* > all_query_res1_rest.txt")
+ os.system("cat query_res2.txt* > all_query_res2_rest.txt")
- # # delete useless files
- # os.system("rm -rf ./insert_res.txt")
- # os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
- # os.system("rm -rf ./querySystemInfo*")
- # os.system("rm -rf ./query_res*")
- # os.system("rm -rf ./all_query*")
- # os.system("rm -rf ./test_query_res0.txt")
-
-
- # # use restful api to query
- # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryRestful.json" % binPath)
- # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- # # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- # tdSql.execute("use db")
- # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- # os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- # tdSql.execute("insert into result0 file './test_query_res0.txt'")
- # tdSql.query("select ts from result0")
- # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- # tdSql.query("select count(*) from result0")
- # tdSql.checkData(0, 0, 1)
- # with open('./all_query_res1.txt','r+') as f1:
- # result1 = int(f1.readline())
- # tdSql.query("select count(*) from stb00_1")
- # tdSql.checkData(0, 0, "%d" % result1)
-
- # with open('./all_query_res2.txt','r+') as f2:
- # result2 = int(f2.readline())
- # d2 = datetime.fromtimestamp(result2/1000)
- # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- # tdSql.query("select last_row(ts) from stb1")
- # tdSql.checkData(0, 0, "%s" % timest)
+ # correct Times testcases
+ queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt")
+ self.assertCheck("all_query_res0_rest.txt",queryTimes0Restful,6)
+
+ queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt")
+ self.assertCheck("all_query_res1_rest.txt",queryTimes1Restful,6)
+ queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt")
+ self.assertCheck("all_query_res2_rest.txt",queryTimes2Restful,4)
+
+ # correct data testcase
+ data0 = self.getfileDataRestful("all_query_res0_rest.txt")
+ self.assertCheck('all_query_res0_rest.txt',data0,"2020-11-01 00:00:00.009")
+
+ data1 = self.getfileDataRestful("all_query_res1_rest.txt")
+ self.assertCheck('all_query_res1_rest.txt',data1,10)
+ data2 = self.getfileDataRestful("all_query_res2_rest.txt")
+ self.assertCheck('all_query_res2_rest.txt',data2,"2020-11-01 00:00:00.004")
+
# query times less than or equal to 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath)
- # query result print QPS
+ #query result print QPS
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath)
+
+ # use illegal or out of range parameters query json file
+ os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
+ exceptcode = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" % binPath)
+ assert exceptcode != 0
+
+ exceptcode0 = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" % binPath)
+ assert exceptcode0 != 0
+
+ exceptcode1 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" % binPath)
+ assert exceptcode1 != 0
+ exceptcode2 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" % binPath)
+ assert exceptcode2 != 0
+
+ exceptcode3 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" % binPath)
+ assert exceptcode3 != 0
+
+ exceptcode4 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" % binPath)
+ assert exceptcode4 != 0
# delete useless files
os.system("rm -rf ./insert_res.txt")
@@ -127,6 +190,8 @@ class TDTestCase:
os.system("rm -rf ./query_res*")
os.system("rm -rf ./all_query*")
os.system("rm -rf ./test_query_res0.txt")
+
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
index 1275b6a8b5d9345147ad36351d4269f0968fff5d..3e967581a4491da4108b981ccd83949751406b82 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py
@@ -19,6 +19,8 @@ from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
+import subprocess
+
class TDTestCase:
def init(self, conn, logSql):
@@ -40,7 +42,22 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
-
+
+ # get the number of subscriptions
+ def subTimes(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # assert results
+ def assertCheck(self,filename,subResult,expectResult):
+ self.filename = filename
+ self.subResult = subResult
+ self.expectResult = expectResult
+ args0 = (filename, subResult, expectResult)
+ assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
@@ -48,48 +65,136 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
+
+ # clear env
+ os.system("ps -ef |grep 'taosdemoAllTest/subSync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ os.system("ps -ef |grep 'taosdemoAllTest/subSyncKeepStart.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ sleep(1)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe_res*")
+ sleep(2)
+ # subscribe: sync
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/subSync.json &" % binPath)
+ query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subSync.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+
+ # insert extral data
+ tdSql.execute("use db")
+ tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)")
+ tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)")
+ sleep(5)
+
+ # merge result files
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
- # query: query specified table and query super table
- # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/sub.json" % binPath)
- # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
- # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
- # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
- # tdSql.execute("use db")
- # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
- # os.system("python3 tools/taosdemoAllTest/convertResFile.py")
- # tdSql.execute("insert into result0 file './test_query_res0.txt'")
- # tdSql.query("select ts from result0")
- # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
- # tdSql.query("select count(*) from result0")
- # tdSql.checkData(0, 0, 1)
- # with open('./all_query_res1.txt','r+') as f1:
- # result1 = int(f1.readline())
- # tdSql.query("select count(*) from stb00_1")
- # tdSql.checkData(0, 0, "%d" % result1)
-
- # with open('./all_query_res2.txt','r+') as f2:
- # result2 = int(f2.readline())
- # d2 = datetime.fromtimestamp(result2/1000)
- # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
- # tdSql.query("select last_row(ts) from stb1")
- # tdSql.checkData(0, 0, "%s" % timest)
+ # correct subscribeTimes testcase
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22)
+
+ subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24)
+
+ subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21)
+ subTimes3 = self.subTimes("all_subscribe_res3.txt")
+ self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13)
- # # query times less than or equal to 100
- # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySpeciMutisql100.json" % binPath)
- # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySuperMutisql100.json" % binPath)
+
+ # correct data testcase
+ os.system("kill -9 %d" % query_pid)
+ sleep(3)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
+
+ # # sql number lager 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0
+
+ # # result files is null
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath)
+ # # assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) != 0
+
+
+
+
+ # resubAfterConsume= -1 endAfter=-1 ;
+ os.system('kill -9 `ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'` ')
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json & " % binPath)
+ sleep(2)
+ query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+ print("get sub1 process'pid")
+ subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1])
+ assert 0==subres0Number1 , "subres0Number1 error"
+ assert 0==subres2Number1 , "subres2Number1 error"
+ tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)")
+ sleep(4)
+ subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ assert 0!=subres2Number2 , "subres2Number2 error"
+ assert 0!=subres0Number2 , "subres0Number2 error"
+ os.system("kill -9 %d" % query_pid1)
+ os.system("rm -rf ./subscribe_res*")
+
+ # # resubAfterConsume= -1 endAfter=0 ;
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ # os.system('kill -9 `ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'` ')
+ # os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json & " % binPath)
+ # sleep(2)
+ # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+ # print("get sub2 process'pid")
+ # subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1])
+ # assert 0==subres0Number1 , "subres0Number1 error"
+ # assert 0==subres2Number1 , "subres2Number1 error"
+ # tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)")
+ # sleep(4)
+ # subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1])
+ # assert 0!=subres2Number2 , "subres2Number2 error"
+ # assert 0!=subres0Number2 , "subres0Number2 error"
+ # os.system("kill -9 %d" % query_pid1)
+ # os.system("rm -rf ./subscribe_res*")
+ # # # merge result files
+ # os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ # os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ # os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ # # os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
+ # sleep(3)
+
+ # # correct subscribeTimes testcase
+ # subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ # self.assertCheck("all_subscribe_res0.txt",subTimes0 ,3960)
+
+ # subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ # self.assertCheck("all_subscribe_res1.txt",subTimes1 ,40)
+
+ # subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ # self.assertCheck("all_subscribe_res2.txt",subTimes2 ,1900)
+
+
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath)
+ # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath)
+
+
+
# delete useless files
- # os.system("rm -rf ./insert_res.txt")
- # os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
- # os.system("rm -rf ./querySystemInfo*")
- # os.system("rm -rf ./query_res*")
- # os.system("rm -rf ./all_query*")
- # os.system("rm -rf ./test_query_res0.txt")
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2aa01e8703d9703d647507736130de2dd582bfb
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py
@@ -0,0 +1,124 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+import time
+from datetime import datetime
+import subprocess
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # 获取订阅次数
+ def subTimes(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ def assertCheck(self,filename,queryResult,expectResult):
+ self.filename = filename
+ self.queryResult = queryResult
+ self.expectResult = expectResult
+ args0 = (filename, queryResult, expectResult)
+ assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # clear env
+ os.system("ps -ef |grep 'taosdemoAllTest/subAsync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ sleep(1)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe_res*")
+
+ # subscribe: resultfile
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath)
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/subAsync.json &" % binPath)
+ query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subAsync.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+
+ # insert extral data
+ tdSql.execute("use db")
+ tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)")
+ tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)")
+ sleep(5)
+
+ # merge result files
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+ os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt")
+
+ # correct subscribeTimes testcase
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22)
+
+ subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24)
+
+ subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21)
+
+ subTimes3 = self.subTimes("all_subscribe_res3.txt")
+ self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13)
+
+ # correct data testcase
+
+ os.system("kill -9 %d" % query_pid)
+
+ # # query times less than or equal to 100
+ os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath)
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0
+ assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0
+
+ # delete useless files
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py
index a45393e22284d675584c5dddd71fc507bcb2563f..c4b125969a7aae7735f29fe9429d029673588d21 100644
--- a/tests/pytest/tools/taosdemoPerformance.py
+++ b/tests/pytest/tools/taosdemoPerformance.py
@@ -17,10 +17,11 @@ import argparse
import os.path
import json
+
class taosdemoPerformace:
def __init__(self, commitID, dbName):
self.commitID = commitID
- self.dbName = dbName
+ self.dbName = dbName
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@@ -30,8 +31,8 @@ class taosdemoPerformace:
self.user,
self.password,
self.config)
- self.insertDB = "insertDB";
-
+ self.insertDB = "insertDB"
+
def generateJson(self):
db = {
"name": "%s" % self.insertDB,
@@ -41,7 +42,7 @@ class taosdemoPerformace:
stb = {
"name": "meters",
- "child_table_exists":"no",
+ "child_table_exists": "no",
"childtable_count": 10000,
"childtable_prefix": "stb_",
"auto_create_table": "no",
@@ -57,12 +58,12 @@ class taosdemoPerformace:
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
- "tags_file": "",
+ "tags_file": "",
"columns": [
{"type": "INT", "count": 4}
- ],
+ ],
"tags": [
- {"type": "INT", "count":1},
+ {"type": "INT", "count": 1},
{"type": "BINARY", "len": 16}
]
}
@@ -88,7 +89,7 @@ class taosdemoPerformace:
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 30000,
- "databases": [db]
+ "databases": [db]
}
insert_json_file = f"/tmp/insert.json"
@@ -103,24 +104,56 @@ class taosdemoPerformace:
cmd.close()
return output
- def insertData(self):
- os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson())
- self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
- self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
- self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdemo" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def insertData(self):
+ tdSql.prepare()
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdemo not found!")
+ else:
+ tdLog.info("taosdemo found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system(
+ "%staosdemo -f %s > taosdemoperf.txt 2>&1" %
+ (binPath, self.generateJson()))
+ self.createTableTime = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
+ self.insertRecordsTime = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
+ self.recordsPerSecond = self.getCMDOutput(
+ "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
self.commitID = self.getCMDOutput("git rev-parse --short HEAD")
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $4}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $4}'")
self.avgDelay = delay[:-4]
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $6}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $6}'")
self.maxDelay = delay[:-4]
- delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $8}'")
+ delay = self.getCMDOutput(
+ "grep 'delay' taosdemoperf.txt | awk '{print $8}'")
self.minDelay = delay[:-3]
os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt")
def createTablesAndStoreData(self):
cursor = self.conn.cursor()
-
+
cursor.execute("create database if not exists %s" % self.dbName)
cursor.execute("use %s" % self.dbName)
cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float)")
@@ -130,13 +163,21 @@ class taosdemoPerformace:
print("records per second: %f" % float(self.recordsPerSecond))
print("avg delay: %f" % float(self.avgDelay))
print("max delay: %f" % float(self.maxDelay))
- print("min delay: %f" % float(self.minDelay))
- cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" %
- (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay)))
+ print("min delay: %f" % float(self.minDelay))
+ cursor.execute(
+ "insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" %
+ (float(
+ self.createTableTime), float(
+ self.insertRecordsTime), float(
+ self.recordsPerSecond), self.commitID, float(
+ self.avgDelay), float(
+ self.maxDelay), float(
+ self.minDelay)))
cursor.execute("drop database if exists %s" % self.insertDB)
cursor.close()
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -155,6 +196,6 @@ if __name__ == '__main__':
args = parser.parse_args()
- perftest = taosdemoPerformace(args.commit_id, args.database_name)
+ perftest = taosdemoPerformace(args.commit_id, args.database_name)
perftest.insertData()
perftest.createTablesAndStoreData()
diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py
index ff5921be604f9fe911f1aa8b84efe230baf20e07..fec69bed648bdfac556e4652a0c45d287b8b739c 100644
--- a/tests/pytest/tools/taosdemoTest.py
+++ b/tests/pytest/tools/taosdemoTest.py
@@ -36,7 +36,7 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if ("taosdemo" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
@@ -47,9 +47,9 @@ class TDTestCase:
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
- tdLog.exit("taosd not found!")
+ tdLog.exit("taosdemo not found!")
else:
- tdLog.info("taosd found in %s" % buildPath)
+ tdLog.info("taosdemo found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -y -t %d -n %d" %
(binPath, self.numberOfTables, self.numberOfRecords))
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 8746f4ecdff32e740467d7caf0c5808dd91a72d5..beac22a2af93fea50df6580c9636fe6b9de8ed42 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -23,44 +23,110 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
self.ts = 1538548685000
self.numberOfTables = 10000
self.numberOfRecords = 100
-
+
+ def checkCommunity(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ return False
+ else:
+ return True
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdump" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
def run(self):
- tdSql.prepare()
+ if not os.path.exists("./taosdumptest/tmp1"):
+ os.makedirs("./taosdumptest/tmp1")
+ else:
+ print("目录存在")
- tdSql.execute("create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
- tdSql.execute("create table t1 using st tags(1, 'beijing')")
+ if not os.path.exists("./taosdumptest/tmp2"):
+ os.makedirs("./taosdumptest/tmp2")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
+ tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.execute("create table t1 using st tags(1, 'beijing')")
sql = "insert into t1 values"
currts = self.ts
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
-
- tdSql.execute("create table t2 using st tags(2, 'shanghai')")
+ tdSql.execute("create table t2 using st tags(2, 'shanghai')")
sql = "insert into t2 values"
currts = self.ts
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
-
- os.system("rm /tmp/*.sql")
- os.system("taosdump --databases db -o /tmp")
-
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system("rm ./taosdumptest/tmp1/*.sql")
+ os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath)
+ os.system("%staosdump --databases db1 -o ./taosdumptest/tmp2" % binPath)
+
tdSql.execute("drop database db")
+ tdSql.execute("drop database db1")
tdSql.query("show databases")
tdSql.checkRows(0)
-
- os.system("taosdump -i /tmp")
- tdSql.query("show databases")
- tdSql.checkRows(1)
- tdSql.checkData(0, 0, 'db')
-
+ os.system("%staosdump -i ./taosdumptest/tmp1" % binPath)
+ os.system("%staosdump -i ./taosdumptest/tmp2" % binPath)
+
tdSql.execute("use db")
+ tdSql.query("show databases")
+ tdSql.checkRows(2)
+ dbresult = tdSql.queryResult
+ # 6--days,7--keep0,keep1,keep, 12--block,
+
+ isCommunity = self.checkCommunity()
+
+ print("iscommunity: %d" % isCommunity)
+ for i in range(len(dbresult)):
+ if dbresult[i][0] == 'db':
+ print(dbresult[i])
+ print(type(dbresult[i][6]))
+ print(type(dbresult[i][7]))
+ print(type(dbresult[i][9]))
+ assert dbresult[i][6] == 11
+ if isCommunity:
+ assert dbresult[i][7] == "3649"
+ else:
+ assert dbresult[i][7] == "3649,3649,3649"
+ assert dbresult[i][9] == 8
+ if dbresult[i][0] == 'db1':
+ assert dbresult[i][6] == 12
+ if isCommunity:
+ assert dbresult[i][7] == "3640"
+ else:
+ assert dbresult[i][7] == "3640,3640,3640"
+ assert dbresult[i][9] == 7
+
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'st')
@@ -82,10 +148,42 @@ class TDTestCase:
tdSql.checkData(i, 1, i)
tdSql.checkData(i, 2, "nchar%d" % i)
+ # drop all databases,boundary value testing.
+ # length(databasename)<=32;length(tablesname)<=192
+ tdSql.execute("drop database db")
+ tdSql.execute("drop database db1")
+ os.system("rm -rf ./taosdumptest/tmp1")
+ os.system("rm -rf ./taosdumptest/tmp2")
+ os.makedirs("./taosdumptest/tmp1")
+ tdSql.execute("create database db12312313231231321312312312_323")
+ tdSql.error("create database db12312313231231321312312312_3231")
+ tdSql.execute("use db12312313231231321312312312_323")
+ tdSql.execute("create stable st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.error("create stable st_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.execute(
+ "create stable st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
+ tdSql.error("create stable st1(ts timestamp, c1 int, col2_012345678901234567890123456789012345678901234567890123456789 nchar(10)) tags(t1 int, t2 binary(10))")
+
+ tdSql.execute("select * from db12312313231231321312312312_323.st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9")
+ tdSql.error("create table t0_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9 using st tags(1, 'beijing')")
+ tdSql.query("show stables")
+ tdSql.checkRows(2)
+ os.system(
+ "%staosdump --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" % binPath)
+ tdSql.execute("drop database db12312313231231321312312312_323")
+ os.system("%staosdump -i ./taosdumptest/tmp1" % binPath)
+ tdSql.execute("use db12312313231231321312312312_323")
+ tdSql.query("show stables")
+ tdSql.checkRows(2)
+ os.system("rm -rf ./taosdumptest/tmp1")
+ os.system("rm -rf ./taosdumptest/tmp2")
+ os.system("rm -rf ./dump_result.txt")
+ os.system("rm -rf ./db.csv")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index 51a73555a805ea503bdf560cbc3773e85b6d35ce..bed0564139e20fb6c562a7258af0cbd5b542069b 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -23,11 +23,27 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
self.ts = 1601481600000
self.numberOfTables = 1
self.numberOfRecords = 15000
-
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdump" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
def run(self):
tdSql.prepare()
@@ -41,22 +57,31 @@ class TDTestCase:
sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
finish = i + 1
if (1048576 - len(sql)) < 16384:
- break
+ break
tdSql.execute(sql)
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
os.system("rm /tmp/*.sql")
- os.system("taosdump --databases db -o /tmp -B 32766 -L 1048576")
-
+ os.system(
+ "%staosdump --databases db -o /tmp -B 32766 -L 1048576" %
+ binPath)
+
tdSql.execute("drop database db")
tdSql.query("show databases")
tdSql.checkRows(0)
-
- os.system("taosdump -i /tmp")
+
+ os.system("%staosdump -i /tmp" % binPath)
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'db')
-
+
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.checkRows(1)
@@ -71,4 +96,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 937265753323a41e51ddf9a3be0061c66b6c586a..c820dd3bf56fb5268092dbdec2d37d7cfa0ca0c5 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -131,6 +131,7 @@ run general/parser/join.sim
run general/parser/join_multivnode.sim
run general/parser/select_with_tags.sim
run general/parser/groupby.sim
+run general/parser/top_groupby.sim
run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index e47af5588e03fe09c47ede387e154c02bbcaa6e9..1fe19714bbd516c2e8938ce1290f04f8d2053839 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -692,6 +692,7 @@ if $data31 != 4 then
return -1
endi
+sql_error select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,c;
sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
if $rows != 40 then
return -1
diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim
index a8d2102befeabf70d70e3a361ad5e933f021ce4a..e063333853e04faf1a7f4988b6dd1f11207aee5d 100644
--- a/tests/script/general/parser/having.sim
+++ b/tests/script/general/parser/having.sim
@@ -1835,5 +1835,8 @@ if $data04 != 1 then
endi
sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0;
+sql_error select count(*) from tb1 group by f1 having last(*) > 0;
+
+print bug for select count(*) k from tb1 group by f1 having k > 0;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim
index a38db3fe44e8857ba646128a856371468d723b2b..0fe5448869a5720a62550a88981114e737e4965b 100644
--- a/tests/script/general/parser/having_child.sim
+++ b/tests/script/general/parser/having_child.sim
@@ -306,41 +306,11 @@ endi
sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having twa(f1) > 0;
-sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3;
-if $rows != 1 then
- return -1
-endi
-if $data00 != 4.000000000 then
- return -1
-endi
-if $data01 != 2 then
- return -1
-endi
-if $data02 != 8 then
- return -1
-endi
-if $data03 != 4.000000000 then
- return -1
-endi
+sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3;
sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having sum(f1) > 0;
-sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4;
-if $rows != 1 then
- return -1
-endi
-if $data00 != 2.000000000 then
- return -1
-endi
-if $data01 != 2 then
- return -1
-endi
-if $data02 != 4 then
- return -1
-endi
-if $data03 != 2.000000000 then
- return -1
-endi
+sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4;
sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0;
if $rows != 4 then
diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim
index fea322ec16b0d67af41b2a727cffa409cef8b37a..7cdd04e2ccdb93c7e1f84298101d74e7c3af061f 100644
--- a/tests/script/general/parser/lastrow.sim
+++ b/tests/script/general/parser/lastrow.sim
@@ -82,5 +82,9 @@ endi
if $data01 != NULL then
return -1
endi
+sql select last_row(*) from (select f from lr_nested)
+if $rows != 1 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim
index b26d163ab5e6d6ac02c8cb955e25b06871b555c3..fe12972bf698aae9f7469ee5768852cb342bc6c3 100644
--- a/tests/script/general/parser/nestquery.sim
+++ b/tests/script/general/parser/nestquery.sim
@@ -424,4 +424,30 @@ if $data01 != 1 then
return -1
endi
+sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0
+
+print ===========>td-4805
+sql_error select tbname, i from (select * from nest_tb0) group by i;
+
+sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1;
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != 100 then
+ return -1
+endi
+
+if $data01 != 0 then
+ return -1
+endi
+
+if $data10 != 100 then
+ return -1
+endi
+
+if $data11 != 1 then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/top_groupby.sim b/tests/script/general/parser/top_groupby.sim
new file mode 100644
index 0000000000000000000000000000000000000000..5709f4d1d7210761292d59aefa8984dad2fd2f23
--- /dev/null
+++ b/tests/script/general/parser/top_groupby.sim
@@ -0,0 +1,52 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
+system sh/exec.sh -n dnode1 -s start
+
+sleep 100
+sql connect
+print ======================== dnode1 start
+
+$db = testdb
+
+sql create database $db
+sql use $db
+
+sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10))
+
+sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1");
+
+sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1")
+sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2")
+sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3")
+sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4")
+sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4")
+
+sql select top(f1, 2) from tb1 group by f1;
+
+if $rows != 5 then
+ return -1
+endi
+
+sql select bottom(f1, 2) from tb1 group by f1;
+
+if $rows != 5 then
+ return -1
+endi
+
+sql select top(f1, 100) from tb1 group by f1;
+
+if $rows != 8 then
+ return -1
+endi
+
+sql select bottom(f1, 100) from tb1 group by f1;
+
+if $rows != 8 then
+ return -1
+endi
+
diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim
index 1d67a7f86b2aef94a90b74d3d0e6a07dc0cf3ef2..ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb 100644
--- a/tests/script/unique/http/admin.sim
+++ b/tests/script/unique/http/admin.sim
@@ -34,6 +34,7 @@ print =============== step1 - login
system_content curl 127.0.0.1:7111/admin/
print 1-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
+ print actual: $system_content
return -1
endi
@@ -149,6 +150,8 @@ endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all
print curl 127.0.0.1:7111/admin/all -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
+ print actual: $system_content
+ print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}
return -1
endi