diff --git a/cmake/define.inc b/cmake/define.inc index 6f49630d5c25d0f1519f3e6c606fe28a026add2d..e825dce024f388e0a376ad0713abca7dd94c5e2a 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -69,7 +69,6 @@ IF (TD_LINUX_32) ENDIF () IF (TD_ARM_64) - ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) @@ -86,17 +85,19 @@ IF (TD_ARM_32) ENDIF () IF (TD_MIPS_64) - ADD_DEFINITIONS(-D_TD_MIPS_64_) + ADD_DEFINITIONS(-D_TD_MIPS_) + ADD_DEFINITIONS(-D_TD_MIPS_64) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) - ADD_DEFINITIONS(-D_TD_MIPS_32_) + ADD_DEFINITIONS(-D_TD_MIPS_) + ADD_DEFINITIONS(-D_TD_MIPS_32) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_APLHINE) diff --git a/cmake/install.inc b/cmake/install.inc index cb571b1620618b35207931b356f52ac2797e901d..9e325531d51c7ebb315fb01ccbd0cbb06ffcd5d5 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.26-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index fe4c017c716ad3849356dbfae4a110ea60b9f25a..8035b31cc7a9a391becaacbff22d708177536ce2 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.19.0") + SET(TD_VER_NUMBER "2.0.20.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/importSampleData/README.md b/importSampleData/README.md index ee3a6e073c18b618af49a9c0b6d2d6d07718f00f..56c5be0da422aadc5e05fe000ab83c312d29b6c8 100644 --- a/importSampleData/README.md +++ b/importSampleData/README.md @@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 -* -savetb int +* -savetb string 当 save 为 1 时保存统计信息的表名, 默认 statistic。 diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go index 61de6e740c1f0cf71c8c94e384dcd68fc58fbc60..5fee49734d058082b662ad0e173b22cf78acac43 100644 --- a/importSampleData/app/main.go +++ b/importSampleData/app/main.go @@ -7,7 +7,6 @@ import ( "encoding/json" "flag" "fmt" - "hash/crc32" "io" "log" "os" @@ -17,47 +16,55 @@ import ( "sync" "time" - dataimport "github.com/taosdata/TDengine/importSampleData/import" + dataImport "github.com/taosdata/TDengine/importSampleData/import" _ "github.com/taosdata/driver-go/taosSql" ) const ( - TIMESTAMP = "timestamp" - DATETIME = "datetime" - MILLISECOND = "millisecond" - DEFAULT_STARTTIME int64 = -1 - DEFAULT_INTERVAL int64 = 1 * 1000 - DEFAULT_DELAY int64 = -1 - DEFAULT_STATISTIC_TABLE = "statistic" - - JSON_FORMAT = "json" - CSV_FORMAT = "csv" - SUPERTABLE_PREFIX = "s_" - SUBTABLE_PREFIX = "t_" - - DRIVER_NAME = "taosSql" - STARTTIME_LAYOUT = "2006-01-02 15:04:05.000" - INSERT_PREFIX = "insert into " + // 主键类型必须为 timestamp + TIMESTAMP = "timestamp" + + // 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 + DATETIME = "datetime" + MILLISECOND = "millisecond" + + DefaultStartTime int64 = -1 + DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。 + DefaultDelay int64 = -1 // + + // 当 save 为 1 时保存统计信息的表名, 默认 statistic。 + DefaultStatisticTable = "statistic" + + // 样例数据文件格式,可以是 json 或 csv + JsonFormat = "json" + CsvFormat = "csv" + + SuperTablePrefix = "s_" // 超级表前缀 + SubTablePrefix = "t_" // 子表前缀 + + DriverName = "taosSql" + StartTimeLayout = "2006-01-02 15:04:05.000" + InsertPrefix = "insert into " ) var ( - cfg string - cases string - hnum int - vnum int - thread int - batch int - auto int - starttimestr string - interval int64 - host string - port int - user string - password string - dropdb int - db string - dbparam string + cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml + cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info + hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。 + vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次 + thread int // 执行导入数据的线程数目,默认为 10 + batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录 + auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0 + startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空 + interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000 + host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1 + port int // 导入的 TDengine 服务器端口,默认为 6030 + user string // 导入的 TDengine 用户名,默认为 root + password string // 导入的 TDengine 用户密码,默认为 taosdata + dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0 + db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd + dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空 dataSourceName string startTime int64 @@ -72,10 +79,10 @@ var ( lastStaticTime time.Time lastTotalRows int64 timeTicker *time.Ticker - delay int64 // default 10 milliseconds - tick int64 - save int - saveTable string + delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。 + tick int64 // 打印统计信息的时间间隔,默认 2000 ms。 + save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 + saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。 ) type superTableConfig struct { @@ -83,7 +90,7 @@ type superTableConfig struct { endTime int64 cycleTime int64 avgInterval int64 - config dataimport.CaseConfig + config dataImport.CaseConfig } type scaleTableInfo struct { @@ -92,14 +99,14 @@ type scaleTableInfo struct { insertRows int64 } -type tableRows struct { - tableName string // tableName - value string // values(...) -} +//type tableRows struct { +// tableName string // tableName +// value string // values(...) +//} type dataRows struct { rows []map[string]interface{} - config dataimport.CaseConfig + config dataImport.CaseConfig } func (rows dataRows) Len() int { @@ -107,9 +114,9 @@ func (rows dataRows) Len() int { } func (rows dataRows) Less(i, j int) bool { - itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp]) - jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp]) - return itime < jtime + iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp]) + jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp]) + return iTime < jTime } func (rows dataRows) Swap(i, j int) { @@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 { } func init() { - parseArg() //parse argument + parseArg() // parse argument if db == "" { - //db = "go" + // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd db = fmt.Sprintf("test_%s", time.Now().Format("20060102")) } - if auto == 1 && len(starttimestr) == 0 { + if auto == 1 && len(startTimeStr) == 0 { log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ") } - if len(starttimestr) != 0 { - t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local) + if len(startTimeStr) != 0 { + t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local) if err != nil { - log.Fatalf("param startTime %s error, %s\n", starttimestr, err) + log.Fatalf("param startTime %s error, %s\n", startTimeStr, err) } startTime = t.UnixNano() / 1e6 // as millisecond } else { - startTime = DEFAULT_STARTTIME + startTime = DefaultStartTime } dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port) @@ -154,9 +161,9 @@ func init() { func main() { - importConfig := dataimport.LoadConfig(cfg) + importConfig := dataImport.LoadConfig(cfg) - var caseMinumInterval int64 = -1 + var caseMinInterval int64 = -1 for _, userCase := range strings.Split(cases, ",") { caseConfig, ok := importConfig.UserCases[userCase] @@ -168,7 +175,7 @@ func main() { checkUserCaseConfig(userCase, &caseConfig) - //read file as map array + // read file as map array fileRows := readFile(caseConfig) log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows)) @@ -177,31 +184,31 @@ func main() { continue } - _, exists := superTableConfigMap[caseConfig.Stname] + _, exists := superTableConfigMap[caseConfig.StName] if !exists { - superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig} + superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig} } else { - log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname) + log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName) } var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows) // set super table's startTime, cycleTime and avgInterval - superTableConfigMap[caseConfig.Stname].startTime = start - superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval - superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime + superTableConfigMap[caseConfig.StName].startTime = start + superTableConfigMap[caseConfig.StName].cycleTime = cycleTime + superTableConfigMap[caseConfig.StName].avgInterval = avgInterval - if caseMinumInterval == -1 || caseMinumInterval > avgInterval { - caseMinumInterval = avgInterval + if caseMinInterval == -1 || caseMinInterval > avgInterval { + caseMinInterval = avgInterval } - startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT) + startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout) log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime) } - if DEFAULT_DELAY == delay { + if DefaultDelay == delay { // default delay - delay = caseMinumInterval / 2 + delay = caseMinInterval / 2 if delay < 1 { delay = 1 } @@ -218,7 +225,7 @@ func main() { createSuperTable(superTableConfigMap) log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6) - //create sub table + // create sub table start = time.Now() createSubTable(subTableMap) log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6) @@ -278,7 +285,7 @@ func staticSpeed() { defer connection.Close() if save == 1 { - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) _, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)") if err != nil { log.Fatalf("create %s Table error: %s\n", saveTable, err) @@ -294,12 +301,12 @@ func staticSpeed() { total := getTotalRows(successRows) currentSuccessRows := total - lastTotalRows - speed := currentSuccessRows * 1e9 / int64(usedTime) + speed := currentSuccessRows * 1e9 / usedTime log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed) if save == 1 { insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed) - connection.Exec(insertSql) + _, _ = connection.Exec(insertSql) } lastStaticTime = currentTime @@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i } else { // use the sample data primary timestamp - sort.Sort(fileRows) // sort the file data by the primarykey + sort.Sort(fileRows) // sort the file data by the primaryKey minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp]) maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp]) start = minTime // default startTime use the minTime - if DEFAULT_STARTTIME != startTime { + // 设置了start时间的话 按照start来 + if DefaultStartTime != startTime { start = startTime } @@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i return } -func createStatisticTable() { - connection := getConnection() - defer connection.Close() - - _, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)") - if err != nil { - log.Fatalf("createStatisticTable error: %s\n", err) - } -} - func createSubTable(subTableMaps map[string]*dataRows) { connection := getConnection() defer connection.Close() - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) createTablePrefix := "create table if not exists " + var buffer bytes.Buffer for subTableName := range subTableMaps { - superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname) - tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags + superTableName := getSuperTableName(subTableMaps[subTableName].config.StName) + firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags - buffers := bytes.Buffer{} - // create table t using supertTable tags(...); + // create table t using superTable tags(...); for i := 0; i < hnum; i++ { tableName := getScaleSubTableName(subTableName, i) @@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) { } scaleTableNames = append(scaleTableNames, tableName) - buffers.WriteString(createTablePrefix) - buffers.WriteString(tableName) - buffers.WriteString(" using ") - buffers.WriteString(superTableName) - buffers.WriteString(" tags(") + buffer.WriteString(createTablePrefix) + buffer.WriteString(tableName) + buffer.WriteString(" using ") + buffer.WriteString(superTableName) + buffer.WriteString(" tags(") for _, tag := range subTableMaps[subTableName].config.Tags { - tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)]) - buffers.WriteString("'" + tagValue + "'") - buffers.WriteString(",") + tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)]) + buffer.WriteString("'" + tagValue + "'") + buffer.WriteString(",") } - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(")") + buffer.Truncate(buffer.Len() - 1) + buffer.WriteString(")") - createTableSql := buffers.String() - buffers.Reset() + createTableSql := buffer.String() + buffer.Reset() //log.Printf("create table: %s\n", createTableSql) _, err := connection.Exec(createTableSql) @@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { if err != nil { log.Fatalf("drop database error: %s\n", err) } - log.Printf("dropDb: %s\n", dropDbSql) + log.Printf("dropdb: %s\n", dropDbSql) } createDbSql := "create database if not exists " + db + " " + dbparam @@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { } log.Printf("createDb: %s\n", createDbSql) - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) prefix := "create table if not exists " var buffer bytes.Buffer @@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { createSql := buffer.String() buffer.Reset() - //log.Printf("supertable: %s\n", createSql) + //log.Printf("superTable: %s\n", createSql) _, err = connection.Exec(createSql) if err != nil { log.Fatalf("create supertable error: %s\n", err) @@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { } -func getScaleSubTableName(subTableName string, hnum int) string { - if hnum == 0 { +func getScaleSubTableName(subTableName string, hNum int) string { + if hNum == 0 { return subTableName } - return fmt.Sprintf("%s_%d", subTableName, hnum) + return fmt.Sprintf("%s_%d", subTableName, hNum) } -func getSuperTableName(stname string) string { - return SUPERTABLE_PREFIX + stname +func getSuperTableName(stName string) string { + return SuperTablePrefix + stName } /** @@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 { row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime - subTableName := getSubTableName(tableValue, fileRows.config.Stname) + subTableName := getSubTableName(tableValue, fileRows.config.StName) value, ok := subTableMap[subTableName] if !ok { @@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int continue } - subTableName := getSubTableName(tableValue, fileRows.config.Stname) + subTableName := getSubTableName(tableValue, fileRows.config.StName) value, ok := currSubTableMap[subTableName] if !ok { @@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int } - var maxRows, tableRows int = 0, 0 + var maxRows, tableRows = 0, 0 for tableName := range currSubTableMap { tableRows = len(currSubTableMap[tableName].rows) subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap @@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int } func getSubTableName(subTableValue string, superTableName string) string { - return SUBTABLE_PREFIX + subTableValue + "_" + superTableName + return SubTablePrefix + subTableValue + "_" + superTableName } func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) { @@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i defer connection.Close() defer wg.Done() - connection.Exec("use " + db) // use db + _, _ = connection.Exec("use " + db) // use db log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end) num := 0 subTables := scaleTableNames[start:end] + var buffer bytes.Buffer for { var currSuccessRows int64 var appendRows int var lastTableName string - buffers := bytes.Buffer{} - buffers.WriteString(INSERT_PREFIX) + buffer.WriteString(InsertPrefix) for _, tableName := range subTables { subTableInfo := subTableMap[scaleTableMap[tableName].subTableName] subTableRows := int64(len(subTableInfo.rows)) - superTableConf := superTableConfigMap[subTableInfo.config.Stname] + superTableConf := superTableConfigMap[subTableInfo.config.StName] tableStartTime := superTableConf.startTime var tableEndTime int64 @@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i // append if lastTableName != tableName { - buffers.WriteString(tableName) - buffers.WriteString(" values") + buffer.WriteString(tableName) + buffer.WriteString(" values") } lastTableName = tableName - buffers.WriteString("(") - buffers.WriteString(fmt.Sprintf("%v", currentTime)) - buffers.WriteString(",") + buffer.WriteString("(") + buffer.WriteString(fmt.Sprintf("%v", currentTime)) + buffer.WriteString(",") - // fieldNum := len(subTableInfo.config.Fields) for _, field := range subTableInfo.config.Fields { - buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) - buffers.WriteString(",") - // if( i != fieldNum -1){ - - // } + buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) + buffer.WriteString(",") } - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(") ") + buffer.Truncate(buffer.Len() - 1) + buffer.WriteString(") ") appendRows++ insertRows++ if appendRows == batch { - // executebatch - insertSql := buffers.String() - connection.Exec("use " + db) + // executeBatch + insertSql := buffer.String() affectedRows := executeBatchInsert(insertSql, connection) successRows[threadIndex] += affectedRows currSuccessRows += affectedRows - buffers.Reset() - buffers.WriteString(INSERT_PREFIX) + buffer.Reset() + buffer.WriteString(InsertPrefix) lastTableName = "" appendRows = 0 } @@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i // left := len(rows) if appendRows > 0 { - // executebatch - insertSql := buffers.String() - connection.Exec("use " + db) + // executeBatch + insertSql := buffer.String() affectedRows := executeBatchInsert(insertSql, connection) successRows[threadIndex] += affectedRows currSuccessRows += affectedRows - buffers.Reset() + buffer.Reset() } // log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6) @@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i } -func buildSql(rows []tableRows) string { - - var lastTableName string - - buffers := bytes.Buffer{} - - for i, row := range rows { - if i == 0 { - lastTableName = row.tableName - buffers.WriteString(INSERT_PREFIX) - buffers.WriteString(row.tableName) - buffers.WriteString(" values") - buffers.WriteString(row.value) - continue - } - - if lastTableName == row.tableName { - buffers.WriteString(row.value) - } else { - buffers.WriteString(" ") - buffers.WriteString(row.tableName) - buffers.WriteString(" values") - buffers.WriteString(row.value) - lastTableName = row.tableName - } - } - - inserSql := buffers.String() - return inserSql -} - -func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows { - - tableRows := tableRows{tableName: tableName} - - buffers := bytes.Buffer{} - - buffers.WriteString("(") - buffers.WriteString(fmt.Sprintf("%v", currentTime)) - buffers.WriteString(",") - - for _, field := range subTableInfo.config.Fields { - buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) - buffers.WriteString(",") - } - - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(")") - - insertSql := buffers.String() - tableRows.value = insertSql - - return tableRows -} - func executeBatchInsert(insertSql string, connection *sql.DB) int64 { - result, error := connection.Exec(insertSql) - if error != nil { - log.Printf("execute insertSql %s error, %s\n", insertSql, error) + result, err := connection.Exec(insertSql) + if err != nil { + log.Printf("execute insertSql %s error, %s\n", insertSql, err) return 0 } affected, _ := result.RowsAffected() @@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 { affected = 0 } return affected - // return 0 } func getFieldValue(fieldValue interface{}) string { @@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string { } func getConnection() *sql.DB { - db, err := sql.Open(DRIVER_NAME, dataSourceName) + db, err := sql.Open(DriverName, dataSourceName) if err != nil { panic(err) } @@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string { return fmt.Sprintf("%v", suffix) } -func hash(s string) int { - v := int(crc32.ChecksumIEEE([]byte(s))) - if v < 0 { - return -v - } - return v -} - -func readFile(config dataimport.CaseConfig) dataRows { +func readFile(config dataImport.CaseConfig) dataRows { fileFormat := strings.ToLower(config.Format) - if fileFormat == JSON_FORMAT { + if fileFormat == JsonFormat { return readJSONFile(config) - } else if fileFormat == CSV_FORMAT { + } else if fileFormat == CsvFormat { return readCSVFile(config) } @@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows { return dataRows{} } -func readCSVFile(config dataimport.CaseConfig) dataRows { +func readCSVFile(config dataImport.CaseConfig) dataRows { var rows dataRows f, err := os.Open(config.FilePath) if err != nil { @@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { line := strings.ToLower(string(lineBytes)) titles := strings.Split(line, config.Separator) if len(titles) < 3 { - // need suffix、 primarykey and at least one other field + // need suffix、 primaryKey and at least one other field log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath) return rows } @@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { } // if the primary key valid - primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap) + primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap) if primaryKeyValue == -1 { log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) continue @@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { return rows } -func readJSONFile(config dataimport.CaseConfig) dataRows { +func readJSONFile(config dataImport.CaseConfig) dataRows { var rows dataRows f, err := os.Open(config.FilePath) @@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows { continue } - primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line) + primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line) if primaryKeyValue == -1 { log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) continue @@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows { /** * get primary key as millisecond , otherwise return -1 */ -func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 { +func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 { if !existMapKeyAndNotEmpty(key, line) { return -1 } @@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool { return true } -func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) { +func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) { - if len(caseConfig.Stname) == 0 { + if len(caseConfig.StName) == 0 { log.Fatalf("the stname of case %s can't be empty\n", caseName) } - caseConfig.Stname = strings.ToLower(caseConfig.Stname) + caseConfig.StName = strings.ToLower(caseConfig.StName) if len(caseConfig.Tags) == 0 { log.Fatalf("the tags of case %s can't be empty\n", caseName) @@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) { } func parseArg() { - flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.") - flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.") + flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.") + flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.") flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.") flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.") - flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.") + flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.") flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.") flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.") - flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.") + flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.") flag.IntVar(&thread, "thread", 10, "number of threads to import data.") flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.") - flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") - flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.") - flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.") + flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") + flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.") + flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.") flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.") flag.IntVar(&port, "port", 6030, "tdengine server port.") flag.StringVar(&user, "user", "root", "user name to login into the database.") flag.StringVar(&password, "password", "taosdata", "the import tdengine user password") - flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.") + flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.") flag.StringVar(&db, "db", "", "name of the database to store data.") flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.") @@ -1066,7 +994,7 @@ func printArg() { fmt.Println("-thread:", thread) fmt.Println("-batch:", batch) fmt.Println("-auto:", auto) - fmt.Println("-start:", starttimestr) + fmt.Println("-start:", startTimeStr) fmt.Println("-interval:", interval) fmt.Println("-host:", host) fmt.Println("-port", port) diff --git a/importSampleData/data/sensor_info.csv b/importSampleData/data/sensor_info.csv index d049c8b00460cdcc2a1bd5b990ae6efa2aa63bd3..c5ff898118e59dcfc0eb24d03db7b326b5fb9342 100644 --- a/importSampleData/data/sensor_info.csv +++ b/importSampleData/data/sensor_info.csv @@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity 8, haerbing, yellow, 2, 1575129697000, 31, 16.321497 8, haerbing, yellow, 2, 1575129698000, 25, 15.864515 8, haerbing, yellow, 2, 1575129699000, 25, 16.492443 -9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889 -9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610 -9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319 -9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807 -9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642 -9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246 -9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774 -9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364 -9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084 -9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289 -9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973 -9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783 -9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101 -9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450 -9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142 -9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837 -9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730 -9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146 -9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187 -9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345 -9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215 -9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621 -9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735 -9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125 -9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485 -9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433 -9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050 -9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168 -9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681 -9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272 -9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976 -9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909 -9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740 -9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645 -9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036 -9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812 -9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939 -9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587 -9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381 -9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530 -9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318 -9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237 -9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297 -9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282 -9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654 -9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736 -9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354 -9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465 -9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276 -9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892 -9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566 -9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849 -9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709 -9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651 -9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347 -9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095 -9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746 -9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939 -9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819 -9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181 -9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922 -9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530 -9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657 -9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854 -9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554 -9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130 -9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404 -9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847 -9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378 -9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507 -9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808 -9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065 -9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803 -9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544 -9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786 -9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420 -9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522 -9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531 -9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744 -9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456 -9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136 -9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523 -9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873 -9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768 -9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610 -9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299 -9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126 -9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302 -9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350 -9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790 -9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568 -9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267 -9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092 -9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021 -9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909 -9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524 -9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127 -9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012 -9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551 -9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868 \ No newline at end of file +9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889 +9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610 +9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319 +9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807 +9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642 +9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246 +9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774 +9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364 +9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084 +9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289 +9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973 +9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783 +9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101 +9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450 +9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142 +9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837 +9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730 +9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146 +9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187 +9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345 +9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215 +9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621 +9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735 +9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125 +9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485 +9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433 +9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050 +9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168 +9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681 +9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272 +9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976 +9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909 +9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740 +9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645 +9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036 +9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812 +9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939 +9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587 +9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381 +9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530 +9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318 +9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237 +9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297 +9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282 +9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654 +9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736 +9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354 +9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465 +9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276 +9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892 +9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566 +9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849 +9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709 +9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651 +9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347 +9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095 +9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746 +9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939 +9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819 +9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181 +9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922 +9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530 +9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657 +9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854 +9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554 +9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130 +9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404 +9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847 +9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378 +9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507 +9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808 +9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065 +9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803 +9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544 +9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786 +9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420 +9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522 +9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531 +9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744 +9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456 +9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136 +9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523 +9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873 +9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768 +9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610 +9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299 +9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126 +9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302 +9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350 +9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790 +9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568 +9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267 +9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092 +9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021 +9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909 +9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524 +9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127 +9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012 +9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551 +9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868 \ No newline at end of file diff --git a/importSampleData/go.mod b/importSampleData/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..fa1d978e597b3eb5b9f35e45f599d5a0f97ff267 --- /dev/null +++ b/importSampleData/go.mod @@ -0,0 +1,8 @@ +module github.com/taosdata/TDengine/importSampleData + +go 1.13 + +require ( + github.com/pelletier/go-toml v1.9.0 // indirect + github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect +) diff --git a/importSampleData/import/import_config.go b/importSampleData/import/import_config.go index e7942cc5050ae369afe896d0f46a0e242fb7e8f6..fdaeeab7da43968f3484c193e6791cc1e45634d7 100644 --- a/importSampleData/import/import_config.go +++ b/importSampleData/import/import_config.go @@ -14,23 +14,23 @@ var ( once sync.Once ) -// Config inclue all scene import config +// Config include all scene import config type Config struct { UserCases map[string]CaseConfig } // CaseConfig include the sample data config and tdengine config type CaseConfig struct { - Format string - FilePath string - Separator string - Stname string - SubTableName string - Timestamp string - TimestampType string - TimestampTypeFormat string - Tags []FieldInfo - Fields []FieldInfo + Format string + FilePath string + Separator string + StName string + SubTableName string + Timestamp string + TimestampType string + TimestampTypeFormat string + Tags []FieldInfo + Fields []FieldInfo } // FieldInfo is field or tag info diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 83b70ed9f89e54ea336a4d066ab3d32bcfdd9c8a..d3bd7510a339c7386cdf83ce5806c2e3ad63db8e 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -64,7 +64,7 @@ # monitorInterval 30 # number of seconds allowed for a dnode to be offline, for cluster only -# offlineThreshold 8640000 +# offlineThreshold 864000 # RPC re-try timer, millisecond # rpcTimer 300 diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 65bf5447e24451efb7de7a8d789c472c9fcfae12..31343ed293c2617a56da57e3893a36b5d3289faf 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.19.0' +version: '2.0.20.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.19.0 + - usr/lib/libtaos.so.2.0.20.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h index 581cd37cbd53cb87847fc5a13c88b03eb797d93a..143922bb1fb6d6e10a157de5d90af2da5e221f76 100644 --- a/src/client/inc/tscLocalMerge.h +++ b/src/client/inc/tscLocalMerge.h @@ -44,23 +44,15 @@ typedef struct SLocalMerger { int32_t numOfCompleted; int32_t numOfVnode; SLoserTreeInfo * pLoserTree; - char * prevRowOfInput; tFilePage * pResultBuf; int32_t nResultBufSize; tFilePage * pTempBuffer; struct SQLFunctionCtx *pCtx; int32_t rowSize; // size of each intermediate result. - bool hasPrevRow; // cannot be released - bool hasUnprocessedRow; tOrderDescriptor * pDesc; SColumnModel * resColModel; SColumnModel* finalModel; tExtMemBuffer ** pExtMemBuffer; // disk-based buffer - SFillInfo* pFillInfo; // interpolation support structure - char* pFinalRes; // result data after interpo - tFilePage* discardData; - bool discard; - int32_t offset; // limit offset value bool orderPrjOnSTable; // projection query on stable } SLocalMerger; @@ -94,7 +86,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde void tscDestroyLocalMerger(SSqlObj *pSql); -int32_t tscDoLocalMerge(SSqlObj *pSql); +//int32_t tscDoLocalMerge(SSqlObj *pSql); #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0eda49b1f40876f695594e80874c60407e1ca45c..922075433099956236832ddb88f8b361054305d2 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -85,15 +85,13 @@ typedef struct SMergeTsCtx { int8_t compared; }SMergeTsCtx; - typedef struct SVgroupTableInfo { SVgroupInfo vgInfo; - SArray* itemList; //SArray + SArray *itemList; // SArray } SVgroupTableInfo; -static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { +static FORCE_INLINE SQueryInfo* tscGetQueryInfo(SSqlCmd* pCmd, int32_t subClauseIndex) { assert(pCmd != NULL && subClauseIndex >= 0); - if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) { return NULL; } @@ -101,6 +99,8 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub return pCmd->pQueryInfo[subClauseIndex]; } +SQueryInfo* tscGetActiveQueryInfo(SSqlCmd* pCmd); + int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks); void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta); void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf); @@ -129,7 +129,13 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); bool tscIsTWAQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo); -bool tscIsTopbotQuery(SQueryInfo* pQueryInfo); +bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); +bool hasTagValOutput(SQueryInfo* pQueryInfo); +bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); +bool isStabledev(SQueryInfo* pQueryInfo); +bool isTsCompQuery(SQueryInfo* pQueryInfo); +bool isSimpleAggregate(SQueryInfo* pQueryInfo); +bool isBlockDistQuery(SQueryInfo* pQueryInfo); int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo); bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); @@ -143,7 +149,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo); bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex); bool tscQueryBlockInfo(SQueryInfo* pQueryInfo); -SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, +SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType); int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql); @@ -174,27 +180,29 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) int32_t tscGetResRowLength(SArray* pExprList); -SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); -SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); -SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, +SExprInfo* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo); -void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex); +void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid); -SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index); +SExprInfo* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index); int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy); +void tscSqlExprAssign(SExprInfo* dst, const SExprInfo* src); void tscSqlExprInfoDestroy(SArray* pExprInfo); SColumn* tscColumnClone(const SColumn* src); -bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex); -SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex); -SArray* tscColumnListClone(const SArray* src, int16_t tableIndex); +bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid); +SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema); void tscColumnListDestroy(SArray* pColList); +void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo); + void tscDequoteAndTrimToken(SStrToken* pToken); int32_t tscValidateName(SStrToken* pToken); @@ -216,8 +224,11 @@ bool tscShouldBeFreed(SSqlObj* pSql); STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex); STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex); -SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex); -SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex); +void tscInitQueryInfo(SQueryInfo* pQueryInfo); +void tscClearSubqueryInfo(SSqlCmd* pCmd); +int32_t tscAddQueryInfo(SSqlCmd *pCmd); +SQueryInfo *tscGetQueryInfo(SSqlCmd* pCmd, int32_t subClauseIndex); +SQueryInfo *tscGetQueryInfoS(SSqlCmd *pCmd, int32_t subClauseIndex); void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo); @@ -225,11 +236,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables); STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo); -int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); - -void tscInitQueryInfo(SQueryInfo* pQueryInfo); -void tscClearSubqueryInfo(SSqlCmd* pCmd); void tscFreeVgroupTableInfo(SArray* pVgroupTables); SArray* tscVgroupTableInfoDup(SArray* pVgroupTables); void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index); @@ -241,6 +248,8 @@ int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool creat void tscResetForNextRetrieve(SSqlRes* pRes); void tscDoQuery(SSqlObj* pSql); +void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo); +void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo); @@ -274,7 +283,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId); -void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); +void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex); bool hasMoreVnodesToTry(SSqlObj *pSql); bool hasMoreClauseToTry(SSqlObj* pSql); @@ -299,7 +308,10 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta); uint32_t tscGetTableMetaMaxSize(); int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name); STableMeta* tscTableMetaDup(STableMeta* pTableMeta); +int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr); +void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema); +void* createQueryInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage); void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c91943e232864ccba69150325bb12c08eb5dee5d..ec3b0c44213f728fcb56236fde282ed9fce50cbf 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -35,6 +35,7 @@ extern "C" { #include "qExecutor.h" #include "qSqlparser.h" #include "qTsbuf.h" +#include "qUtil.h" #include "tcmdtype.h" // forward declaration @@ -96,56 +97,29 @@ typedef struct STableMetaInfo { SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; - typedef struct SColumnIndex { int16_t tableIndex; int16_t columnIndex; } SColumnIndex; - -typedef struct SFieldInfo { - int16_t numOfOutput; // number of column in result - TAOS_FIELD* final; - SArray *internalField; // SArray -} SFieldInfo; - typedef struct SColumn { - SColumnIndex colIndex; - int32_t numOfFilters; - SColumnFilterInfo *filterInfo; + uint64_t tableUid; + int32_t columnIndex; + SColumnInfo info; } SColumn; -/* the structure for sql function in select clause */ -typedef struct SSqlExpr { - char aliasName[TSDB_COL_NAME_LEN]; // as aliasName - SColIndex colInfo; - uint64_t uid; // refactor use the pointer - int16_t functionId; // function id in aAgg array - int16_t resType; // return value type - int16_t resBytes; // length of return value - int32_t interBytes; // inter result buffer size - int16_t numOfParams; // argument value of each function - tVariant param[3]; // parameters are not more than 3 - int32_t offset; // sub result column value of arithmetic expression. - int16_t resColId; // result column id - SColumn *pFilter; // expr filter -} SSqlExpr; - -typedef struct SExprFilter { - tSqlExpr *pExpr; //used for having parse - SSqlExpr *pSqlExpr; - SArray *fp; - SColumn *pFilters; //having filter info -}SExprFilter; - typedef struct SInternalField { TAOS_FIELD field; bool visible; - SExprInfo *pArithExprInfo; - SSqlExpr *pSqlExpr; - SExprFilter *pFieldFilters; + SExprInfo *pExpr; } SInternalField; +typedef struct SFieldInfo { + int16_t numOfOutput; // number of column in result + TAOS_FIELD* final; + SArray *internalField; // SArray +} SFieldInfo; + typedef struct SCond { uint64_t uid; int32_t len; // length of tag query condition data @@ -160,7 +134,7 @@ typedef struct SJoinNode { } SJoinNode; typedef struct SJoinInfo { - bool hasJoin; + bool hasJoin; SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM]; } SJoinInfo; @@ -229,13 +203,14 @@ typedef struct SQueryInfo { SInterval interval; // tumble time window SSessionWindow sessionWindow; // session time window - SSqlGroupbyExpr groupbyExpr; // group by tags info + SSqlGroupbyExpr groupbyExpr; // groupby tags info SArray * colList; // SArray SFieldInfo fieldsInfo; - SArray * exprList; // SArray + SArray * exprList; // SArray SLimitVal limit; SLimitVal slimit; STagCond tagCond; + SOrderVal order; int16_t fillType; // final result fill type int16_t numOfTables; @@ -254,7 +229,15 @@ typedef struct SQueryInfo { int32_t round; // 0/1/.... int32_t bufLen; char* buf; - int32_t havingFieldNum; + SQInfo* pQInfo; // global merge operator + SArray* pDSOperator; // data source operator + SArray* pPhyOperator; // physical query execution plan + SQueryAttr* pQueryAttr; // query object + + struct SQueryInfo *sibling; // sibling + SArray *pUpstream; // SArray + struct SQueryInfo *pDownstream; + int32_t havingFieldNum; } SQueryInfo; typedef struct { @@ -269,8 +252,6 @@ typedef struct { }; uint32_t insertType; // TODO remove it - int32_t clauseIndex; // index of multiple subclause query - char * curSql; // current sql, resume position of sql after parsing paused int8_t parseFinished; char reserve2[3]; // fix bus error on arm32 @@ -280,22 +261,26 @@ typedef struct { uint32_t allocSize; char * payload; int32_t payloadLen; + SQueryInfo **pQueryInfo; int32_t numOfClause; + int32_t clauseIndex; // index of multiple subclause query + SQueryInfo *active; // current active query info + int32_t batchSize; // for parameter ('?') binding and batch processing int32_t numOfParams; int8_t dataSourceType; // load data from file or not - char reserve4[3]; // fix bus error on arm32 + char reserve4[3]; // fix bus error on arm32 int8_t submitSchema; // submit block is built with table schema - char reserve5[3]; // fix bus error on arm32 + char reserve5[3]; // fix bus error on arm32 STagData tagData; // NOTE: pTagData->data is used as a variant length array SName **pTableNameList; // all involved tableMeta list of current insert sql statement. int32_t numOfTables; SHashObj *pTableBlockHashList; // data block for each table - SArray *pDataBlocks; // SArray. Merged submit block for each vgroup + SArray *pDataBlocks; // SArray. Merged submit block for each vgroup } SSqlCmd; typedef struct SResRec { @@ -438,7 +423,7 @@ void tscInitMsgsFp(); int tsParseSql(SSqlObj *pSql, bool initial); void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet); -int tscProcessSql(SSqlObj *pSql); +int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo); int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex); void tscAsyncResultOnError(SSqlObj *pSql); @@ -453,6 +438,9 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo); int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo); +void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock); + +void handleDownstreamOperator(SSqlRes* pRes, SQueryInfo* pQueryInfo); void destroyTableNameList(SSqlCmd* pCmd); void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta); @@ -500,47 +488,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) { - SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex); - - int32_t type = pInfo->field.type; - int32_t bytes = pInfo->field.bytes; - - char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row); - UNUSED(pData); - -// user defined constant value output columns - if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { - if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { - pData = pInfo->pSqlExpr->param[1].pz; - pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; - pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData; - } else { - assert(bytes == tDataTypes[type].bytes); - - pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64; - pRes->length[columnIndex] = bytes; - } - } else { - if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { - int32_t realLen = varDataLen(pData); - assert(realLen <= bytes - VARSTR_HEADER_SIZE); - - pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : (unsigned char*)((tstr *)pData)->data; - if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor - *(pData + realLen + VARSTR_HEADER_SIZE) = 0; - } - - pRes->length[columnIndex] = realLen; - } else { - assert(bytes == tDataTypes[type].bytes); - - pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData; - pRes->length[columnIndex] = bytes; - } - } -} - extern int32_t sentinel; extern SHashObj *tscVgroupMap; extern SHashObj *tscTableMetaInfo; diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 582bd6bac03ce0049c7c22fe58fa4fa6eb8c69fb..b3060e2c820d7bbb405bf6e96a4bf8af8ed0ec55 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -49,6 +49,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset (JNIEnv *, jclass); +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getResultTimePrecision + * Signature: (J)J + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision + (JNIEnv *, jobject, jlong, jlong); + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: connectImp diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 56e155311e60dd0a30d3f6b5dce99c9d4c76ec7f..7447e36ac9cf1074db0c62077be07b6a41a99256 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -671,3 +671,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) { return (*env)->NewStringUTF(env, (const char *)tsCharset); } + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision(JNIEnv *env, jobject jobj, jlong con, + jlong res) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_RES *result = (TAOS_RES *)res; + if (result == NULL) { + jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); + return JNI_RESULT_SET_NULL; + } + + return taos_result_precision(result); +} \ No newline at end of file diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 0cfcff3a9869cecb318ca293fbb9e165924a1c07..09b31e4b19c095cb712a4c0f54bb42db34b9949c 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para pSql->sqlstr = calloc(1, sqlLen + 1); if (pSql->sqlstr == NULL) { - tscError("%p failed to malloc sql string buffer", pSql); + tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; tscAsyncResultOnError(pSql); return; @@ -57,7 +57,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen); - tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); + tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); pCmd->curSql = pSql->sqlstr; int32_t code = tsParseSql(pSql, true); @@ -69,7 +69,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para return; } - tscDoQuery(pSql); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); + executeQuery(pSql, pQueryInfo); } // TODO return the correct error code to client in tscQueueAsyncError @@ -80,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { - tscError("bug!!! pObj:%p", pObj); + tscError("pObj:%p is NULL or freed", pObj); terrno = TSDB_CODE_TSC_DISCONNECTED; tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED); return NULL; @@ -179,7 +180,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { tscFetchDatablockForSubquery(pSql); } else { - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } } @@ -193,8 +194,8 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy); } -void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { - SSqlObj *pSql = (SSqlObj *)taosa; +void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) { + SSqlObj *pSql = (SSqlObj *)tres; if (pSql == NULL || pSql->signature != pSql) { tscError("sql object is NULL"); tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED); @@ -206,18 +207,16 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { // user-defined callback function is stored in fetchFp pSql->fetchFp = fp; - pSql->fp = tscAsyncFetchRowsProxy; + pSql->fp = tscAsyncFetchRowsProxy; + pSql->param = param; if (pRes->qId == 0) { - tscError("qhandle is NULL"); + tscError("qhandle is invalid"); pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE; - pSql->param = param; - tscAsyncResultOnError(pSql); return; } - pSql->param = param; tscResetForNextRetrieve(pRes); // handle the sub queries of join query @@ -255,8 +254,9 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - - tscProcessSql(pSql); + + SQueryInfo* pQueryInfo1 = tscGetActiveQueryInfo(&pSql->cmd); + tscBuildAndSendRequest(pSql, pQueryInfo1); } } @@ -283,12 +283,12 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { static void tscAsyncResultCallback(SSchedMsg *pMsg) { SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pMsg->ahandle); if (pSql == NULL || pSql->signature != pSql) { - tscDebug("%p SqlObj is freed, not add into queue async res", pSql); + tscDebug("%p SqlObj is freed, not add into queue async res", pMsg->ahandle); return; } assert(pSql->res.code != TSDB_CODE_SUCCESS); - tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code)); + tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code)); SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ @@ -323,7 +323,7 @@ static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableM SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + SSqlExpr *pExpr = &(tscSqlExprGet(pQueryInfo, i)->base); pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; if (pExpr->colInfo.colIndex >= 0) { @@ -344,7 +344,7 @@ static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableM // validate the table columns information for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) { SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i); - if (pCol->colIndex.columnIndex >= numOfCols) { + if (pCol->columnIndex >= numOfCols) { return pSql->retryReason; } } @@ -368,17 +368,17 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj *sub = (SSqlObj*) res; const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; if (code != TSDB_CODE_SUCCESS) { - tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); + tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code)); goto _error; } - tscDebug("%p get %s successfully", pSql, msg); + tscDebug("0x%"PRIx64" get %s successfully", pSql->self, msg); if (pSql->pStream == NULL) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); // check if it is a sub-query of super table query first, if true, enter another routine if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { - tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql); + tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send the corresponding query", pSql->self); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -396,13 +396,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - // tscProcessSql can add error into async res - tscProcessSql(pSql); + // tscBuildAndSendRequest can add error into async res + tscBuildAndSendRequest(pSql, NULL); taosReleaseRef(tscObjRef, pSql->self); return; } else { // continue to process normal async query if (pCmd->parseFinished) { - tscDebug("%p update local table meta, continue to process sql and send corresponding query", pSql); + tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send corresponding query", pSql->self); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); @@ -416,7 +416,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { assert(pCmd->command != TSDB_SQL_INSERT); if (pCmd->command == TSDB_SQL_SELECT) { - tscDebug("%p redo parse sql string and proceed", pSql); + tscDebug("0x%"PRIx64" redo parse sql string and proceed", pSql->self); pCmd->parseFinished = false; tscResetSqlCmd(pCmd, true); @@ -428,15 +428,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } else { // in all other cases, simple retry - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } taosReleaseRef(tscObjRef, pSql->self); return; } else { - tscDebug("%p continue parse sql after get table meta", pSql); + tscDebug("0x%"PRIx64" continue parse sql after get table meta", pSql->self); code = tsParseSql(pSql, false); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -447,21 +447,29 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } if (pCmd->insertType == TSDB_QUERY_TYPE_STMT_INSERT) { - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); + STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { taosReleaseRef(tscObjRef, pSql->self); return; } else { - assert(code == TSDB_CODE_SUCCESS); + assert(code == TSDB_CODE_SUCCESS); } (*pSql->fp)(pSql->param, pSql, code); - taosReleaseRef(tscObjRef, pSql->self); - return; + } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { + if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { + tscImportDataFromFile(pSql); + } else { + tscHandleMultivnodeInsert(pSql); + } + } else { + SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, pCmd->clauseIndex); + executeQuery(pSql, pQueryInfo1); } - // proceed to invoke the tscDoQuery(); + taosReleaseRef(tscObjRef, pSql->self); + return; } } @@ -486,7 +494,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } } - tscDebug("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command); + tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pSql->cmd.command); if (!pSql->cmd.parseFinished) { tsParseSql(pSql, false); } @@ -498,7 +506,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { return; } - tscDoQuery(pSql); +// tscDoQuery(pSql); taosReleaseRef(tscObjRef, pSql->self); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 188ba29a97c244ce5c5027c459fabc303ed85c0f..a7882ffa6186224e30cbbdfb84ff5300131e479e 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -53,7 +53,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSqlRes *pRes = &pSql->res; // one column for each row - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; @@ -154,14 +154,14 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pSql->cmd.numOfCols = numOfCols; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); pQueryInfo->order.order = TSDB_ORDER_ASC; TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE}; tstrncpy(f.name, "Field", sizeof(f.name)); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); @@ -171,7 +171,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Type", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), -1000, typeColLength, false); rowLen += typeColLength + VARSTR_HEADER_SIZE; @@ -181,7 +181,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Length", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), -1000, sizeof(int32_t), false); rowLen += sizeof(int32_t); @@ -191,7 +191,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Note", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), -1000, noteColLength, false); rowLen += noteColLength + VARSTR_HEADER_SIZE; @@ -199,7 +199,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, } static int32_t tscProcessDescribeTable(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); assert(tscGetMetaInfo(pQueryInfo, 0)->pTableMeta != NULL); @@ -389,7 +389,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const SColumnIndex index = {0}; pSql->cmd.numOfCols = 2; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); pQueryInfo->order.order = TSDB_ORDER_ASC; TAOS_FIELD f; @@ -404,7 +404,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); rowLen += f.bytes; @@ -417,7 +417,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false); rowLen += ddlLen + VARSTR_HEADER_SIZE; @@ -427,7 +427,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) { SSqlRes *pRes = &pSql->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); int32_t numOfRows = 1; if (strlen(ddl) == 0) { @@ -444,7 +444,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c return 0; } static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result); tscFieldInfoUpdateOffset(pQueryInfo); @@ -552,7 +552,7 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) { return TSDB_CODE_SUCCESS; } static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; @@ -606,7 +606,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch } static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; @@ -633,7 +633,7 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, } static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, char *ddl) { char *result = ddl; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; @@ -674,7 +674,7 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, } static int32_t tscProcessShowCreateTable(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); assert(pTableMetaInfo->pTableMeta != NULL); @@ -700,7 +700,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) { } static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -727,7 +727,7 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) { return TSDB_CODE_TSC_ACTION_IN_PROGRESS; } static int32_t tscProcessCurrentUser(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resBytes = TSDB_USER_LEN + TSDB_DATA_TYPE_BINARY; @@ -754,7 +754,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) { extractDBName(pSql->pTscObj->db, db); pthread_mutex_unlock(&pSql->pTscObj->mutex); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; @@ -781,7 +781,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) { static int32_t tscProcessServerVer(SSqlObj *pSql) { const char* v = pSql->pTscObj->sversion; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; @@ -804,7 +804,7 @@ static int32_t tscProcessServerVer(SSqlObj *pSql) { } static int32_t tscProcessClientVer(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; @@ -856,7 +856,7 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) { return pSql->res.code; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); int32_t val = 1; @@ -870,7 +870,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa pCmd->numOfCols = 1; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); pQueryInfo->order.order = TSDB_ORDER_ASC; tscFieldInfoClear(&pQueryInfo->fieldsInfo); @@ -882,7 +882,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength); SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0); - pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0); + pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0); memcpy(pRes->data, val, pInfo->field.bytes); } @@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pRes->code = tscProcessServStatus(pSql); } else { pRes->code = TSDB_CODE_TSC_INVALID_SQL; - tscError("%p not support command:%d", pSql, pCmd->command); + tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command); } // keep the code in local variable in order to avoid invalid read in case of async query diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index c296072393eec68f7e753fe4a556c745743d2c50..e4a3ace6b5af128971a5bed21562082602754d97 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -31,6 +31,8 @@ typedef struct SCompareParam { int32_t groupOrderType; } SCompareParam; +bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndex, int32_t index, char **buf); + int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { int32_t pLeftIdx = *(int32_t *)pLeft; int32_t pRightIdx = *(int32_t *)pRight; @@ -57,134 +59,68 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { } } -static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) { - /* - * the fields and offset attributes in pCmd and pModel may be different due to - * merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object. - */ - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); +// todo merge with vnode side function +void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SQLFunctionCtx *pCtx = &pReducer->pCtx[i]; - SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i); - pCtx->pOutput = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity; - pCtx->order = pQueryInfo->order.order; - pCtx->functionId = pExpr->functionId; + pCtx[i].order = pQueryInfo->order.order; + pCtx[i].functionId = pExpr->base.functionId; - // input buffer hold only one point data - int16_t offset = getColumnModelOffset(pDesc->pColumnModel, i); - SSchema *pSchema = getColumnModelSchema(pDesc->pColumnModel, i); - - pCtx->pInput = pReducer->pTempBuffer->data + offset; + pCtx[i].order = pQueryInfo->order.order; + pCtx[i].functionId = pExpr->base.functionId; // input data format comes from pModel - pCtx->inputType = pSchema->type; - pCtx->inputBytes = pSchema->bytes; + pCtx[i].inputType = pSchema[i].type; + pCtx[i].inputBytes = pSchema[i].bytes; - // output data format yet comes from pCmd. - pCtx->outputBytes = pExpr->resBytes; - pCtx->outputType = pExpr->resType; + pCtx[i].outputBytes = pExpr->base.resBytes; + pCtx[i].outputType = pExpr->base.resType; - pCtx->size = 1; - pCtx->hasNull = true; - pCtx->currentStage = MERGE_STAGE; + // input buffer hold only one point data + pCtx[i].size = 1; + pCtx[i].hasNull = true; + pCtx[i].currentStage = MERGE_STAGE; // for top/bottom function, the output of timestamp is the first column - int32_t functionId = pExpr->functionId; + int32_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx->ptsOutputBuf = pReducer->pCtx[0].pOutput; - pCtx->param[2].i64 = pQueryInfo->order.order; - pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[1].i64 = pQueryInfo->order.orderColId; + pCtx[i].ptsOutputBuf = pCtx[0].pOutput; + pCtx[i].param[2].i64 = pQueryInfo->order.order; + pCtx[i].param[2].nType = TSDB_DATA_TYPE_BIGINT; + pCtx[i].param[1].i64 = pQueryInfo->order.orderColId; + pCtx[i].param[0].i64 = pExpr->base.param[0].i64; // top/bot parameter } else if (functionId == TSDB_FUNC_APERCT) { - pCtx->param[0].i64 = pExpr->param[0].i64; - pCtx->param[0].nType = pExpr->param[0].nType; + pCtx[i].param[0].i64 = pExpr->base.param[0].i64; + pCtx[i].param[0].nType = pExpr->base.param[0].nType; } else if (functionId == TSDB_FUNC_BLKINFO) { - pCtx->param[0].i64 = pExpr->param[0].i64; - pCtx->param[0].nType = pExpr->param[0].nType; - pCtx->numOfParams = 1; - } - - pCtx->interBufBytes = pExpr->interBytes; - pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo)); - pCtx->stableQuery = true; - } - - int16_t n = 0; - int16_t tagLen = 0; - SQLFunctionCtx **pTagCtx = calloc(pQueryInfo->fieldsInfo.numOfOutput, POINTER_BYTES); - - SQLFunctionCtx *pCtx = NULL; - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) { - tagLen += pExpr->resBytes; - pTagCtx[n++] = &pReducer->pCtx[i]; - } else if ((aAggs[pExpr->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { - pCtx = &pReducer->pCtx[i]; - } - } - - if (n == 0 || pCtx == NULL) { - free(pTagCtx); - } else { - pCtx->tagInfo.pTagCtxList = pTagCtx; - pCtx->tagInfo.numOfTagCols = n; - pCtx->tagInfo.tagsLen = tagLen; - } -} - -static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { - int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); - int32_t offset = 0; - - SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); - for(int32_t i = 0; i < numOfCols; ++i) { - SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); - - if (pIField->pArithExprInfo == NULL) { - SSqlExpr* pExpr = pIField->pSqlExpr; - - pFillCol[i].col.bytes = pExpr->resBytes; - pFillCol[i].col.type = (int8_t)pExpr->resType; - pFillCol[i].col.colId = pExpr->colInfo.colId; - pFillCol[i].flag = pExpr->colInfo.flag; - pFillCol[i].col.offset = offset; - pFillCol[i].functionId = pExpr->functionId; - pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; - } else { - pFillCol[i].col.bytes = pIField->field.bytes; - pFillCol[i].col.type = (int8_t)pIField->field.type; - pFillCol[i].col.colId = -100; - pFillCol[i].flag = TSDB_COL_NORMAL; - pFillCol[i].col.offset = offset; - pFillCol[i].functionId = -1; - pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; + pCtx[i].param[0].i64 = pExpr->base.param[0].i64; + pCtx[i].param[0].nType = pExpr->base.param[0].nType; + pCtx[i].numOfParams = 1; } - offset += pFillCol[i].col.bytes; + pCtx[i].interBufBytes = pExpr->base.interBytes; + pCtx[i].stableQuery = true; } - - return pFillCol; } void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) { + SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj *pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; if (pMemBuffer == NULL) { tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); - tscError("%p pMemBuffer is NULL", pMemBuffer); + tscError("pMemBuffer:%p is NULL", pMemBuffer); pRes->code = TSDB_CODE_TSC_APP_ERROR; return; } if (pDesc->pColumnModel == NULL) { tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); - tscError("%p no local buffer or intermediate result format model", pSql); + tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self); pRes->code = TSDB_CODE_TSC_APP_ERROR; return; } @@ -193,7 +129,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde for (int32_t i = 0; i < numOfBuffer; ++i) { int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength; if (len == 0) { - tscDebug("%p no data retrieved from orderOfVnode:%d", pSql, i + 1); + tscDebug("0x%"PRIx64" no data retrieved from orderOfVnode:%d", pSql->self, i + 1); continue; } @@ -203,12 +139,12 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde if (numOfFlush == 0 || numOfBuffer == 0) { tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty - tscDebug("%p retrieved no data", pSql); + tscDebug("0x%"PRIx64" retrieved no data", pSql->self); return; } if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) { - tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity, + tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity, pMemBuffer[0]->pageSize); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); @@ -218,24 +154,24 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush; - SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size); - if (pReducer == NULL) { - tscError("%p failed to create local merge structure, out of memory", pSql); + SLocalMerger *pMerger = (SLocalMerger *) calloc(1, size); + if (pMerger == NULL) { + tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; } - pReducer->pExtMemBuffer = pMemBuffer; - pReducer->pLocalDataSrc = (SLocalDataSource **)&pReducer[1]; - assert(pReducer->pLocalDataSrc != NULL); + pMerger->pExtMemBuffer = pMemBuffer; + pMerger->pLocalDataSrc = (SLocalDataSource **)&pMerger[1]; + assert(pMerger->pLocalDataSrc != NULL); - pReducer->numOfBuffer = numOfFlush; - pReducer->numOfVnode = numOfBuffer; + pMerger->numOfBuffer = numOfFlush; + pMerger->numOfVnode = numOfBuffer; - pReducer->pDesc = pDesc; - tscDebug("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer); + pMerger->pDesc = pDesc; + tscDebug("0x%"PRIx64" the number of merged leaves is: %d", pSql->self, pMerger->numOfBuffer); int32_t idx = 0; for (int32_t i = 0; i < numOfBuffer; ++i) { @@ -244,13 +180,13 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde for (int32_t j = 0; j < numOfFlushoutInFile; ++j) { SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize); if (ds == NULL) { - tscError("%p failed to create merge structure", pSql); + tscError("0x%"PRIx64" failed to create merge structure", pSql->self); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tfree(pReducer); + tfree(pMerger); return; } - pReducer->pLocalDataSrc[idx] = ds; + pMerger->pLocalDataSrc[idx] = ds; ds->pMemBuffer = pMemBuffer[i]; ds->flushoutIdx = j; @@ -258,12 +194,12 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde ds->pageId = 0; ds->rowIdx = 0; - tscDebug("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSql, i + 1, idx + 1); + tscDebug("0x%"PRIx64" load data from disk into memory, orderOfVnode:%d, total:%d", pSql->self, i + 1, idx + 1); tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0); #ifdef _DEBUG_VIEW printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.num); SSrcColumnInfo colInfo[256] = {0}; - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo * pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); tscGetSrcColumnInfo(colInfo, pQueryInfo); @@ -272,7 +208,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde #endif if (ds->filePage.num == 0) { // no data in this flush, the index does not increase - tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx); + tscDebug("0x%"PRIx64" flush data is empty, ignore %d flush record", pSql->self, idx); tfree(ds); continue; } @@ -283,99 +219,92 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde // no data actually, no need to merge result. if (idx == 0) { - tfree(pReducer); + tfree(pMerger); return; } - pReducer->numOfBuffer = idx; + pMerger->numOfBuffer = idx; SCompareParam *param = malloc(sizeof(SCompareParam)); if (param == NULL) { - tfree(pReducer); + tfree(pMerger); return; } - param->pLocalData = pReducer->pLocalDataSrc; - param->pDesc = pReducer->pDesc; - param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + param->pLocalData = pMerger->pLocalDataSrc; + param->pDesc = pMerger->pDesc; + param->num = pMerger->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); param->groupOrderType = pQueryInfo->groupbyExpr.orderType; - pReducer->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); + pMerger->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); - pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); - if (pReducer->pLoserTree == NULL || pRes->code != 0) { + pRes->code = tLoserTreeCreate(&pMerger->pLoserTree, pMerger->numOfBuffer, param, treeComparator); + if (pMerger->pLoserTree == NULL || pRes->code != 0) { tfree(param); - tfree(pReducer); + tfree(pMerger); return; } // the input data format follows the old format, but output in a new format. // so, all the input must be parsed as old format - pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx)); - pReducer->rowSize = pMemBuffer[0]->nElemSize; + pMerger->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx)); + pMerger->rowSize = pMemBuffer[0]->nElemSize; - tscRestoreFuncForSTableQuery(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo); - if (pReducer->rowSize > pMemBuffer[0]->pageSize) { + if (pMerger->rowSize > pMemBuffer[0]->pageSize) { assert(false); // todo fixed row size is larger than the minimum page size; } - pReducer->hasPrevRow = false; - pReducer->hasUnprocessedRow = false; - - pReducer->prevRowOfInput = (char *)calloc(1, pReducer->rowSize); - // used to keep the latest input row - pReducer->pTempBuffer = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage)); - pReducer->discardData = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage)); - pReducer->discard = false; + pMerger->pTempBuffer = (tFilePage *)calloc(1, pMerger->rowSize + sizeof(tFilePage)); - pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16; - pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage)); + pMerger->nResultBufSize = pMemBuffer[0]->pageSize * 16; + pMerger->pResultBuf = (tFilePage *)calloc(1, pMerger->nResultBufSize + sizeof(tFilePage)); - pReducer->resColModel = finalmodel; - pReducer->resColModel->capacity = pReducer->nResultBufSize; - pReducer->finalModel = pFFModel; + pMerger->resColModel = finalmodel; + pMerger->resColModel->capacity = pMerger->nResultBufSize; + pMerger->finalModel = pFFModel; - int32_t expandFactor = 1; if (finalmodel->rowSize > 0) { - bool topBotQuery = tscIsTopbotQuery(pQueryInfo); - if (topBotQuery) { - expandFactor = tscGetTopbotQueryParam(pQueryInfo); - pReducer->resColModel->capacity /= (finalmodel->rowSize * expandFactor); - pReducer->resColModel->capacity *= expandFactor; - } else { - pReducer->resColModel->capacity /= finalmodel->rowSize; - } + pMerger->resColModel->capacity /= finalmodel->rowSize; } - assert(finalmodel->rowSize > 0 && finalmodel->rowSize <= pReducer->rowSize); - - pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity); + assert(finalmodel->rowSize > 0 && finalmodel->rowSize <= pMerger->rowSize); - if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || - pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { - tfree(pReducer->pTempBuffer); - tfree(pReducer->discardData); - tfree(pReducer->pResultBuf); - tfree(pReducer->pFinalRes); - tfree(pReducer->prevRowOfInput); - tfree(pReducer->pLoserTree); + if (pMerger->pTempBuffer == NULL || pMerger->pLoserTree == NULL) { + tfree(pMerger->pTempBuffer); + tfree(pMerger->pLoserTree); tfree(param); - tfree(pReducer); + tfree(pMerger); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; } - pReducer->pTempBuffer->num = 0; - + pMerger->pTempBuffer->num = 0; tscCreateResPointerInfo(pRes, pQueryInfo); - tscInitSqlContext(pCmd, pReducer, pDesc); + + SSchema* pschema = calloc(pDesc->pColumnModel->numOfCols, sizeof(SSchema)); + for(int32_t i = 0; i < pDesc->pColumnModel->numOfCols; ++i) { + pschema[i] = pDesc->pColumnModel->pFields[i].field; + } + + tsCreateSQLFunctionCtx(pQueryInfo, pMerger->pCtx, pschema); +// setCtxInputOutputBuffer(pQueryInfo, pMerger->pCtx, pMerger, pDesc); + + tfree(pschema); + + int32_t maxBufSize = 0; + for (int32_t k = 0; k < tscSqlExprNumOfExprs(pQueryInfo); ++k) { + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, k); + if (maxBufSize < pExpr->base.resBytes && pExpr->base.functionId == TSDB_FUNC_TAG) { + maxBufSize = pExpr->base.resBytes; + } + } // we change the capacity of schema to denote that there is only one row in temp buffer - pReducer->pDesc->pColumnModel->capacity = 1; + pMerger->pDesc->pColumnModel->capacity = 1; // restore the limitation value at the last stage if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { @@ -383,23 +312,22 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde pQueryInfo->limit.offset = pQueryInfo->prjOffset; } - pReducer->offset = (int32_t)pQueryInfo->limit.offset; - - pRes->pLocalMerger = pReducer; + pRes->pLocalMerger = pMerger; pRes->numOfGroups = 0; - STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); - STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); +// STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); +// STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey; - int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision); +// TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey; +// int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision); - if (pQueryInfo->fillType != TSDB_FILL_NONE) { - SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); - pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, - 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, - tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); - } +// if (pQueryInfo->fillType != TSDB_FILL_NONE) { +// SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); +// pMerger->pFillInfo = +// taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, 4096, +// (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, +// pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); +// } } static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, @@ -500,62 +428,34 @@ void tscDestroyLocalMerger(SSqlObj *pSql) { return; } - SSqlCmd * pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - // there is no more result, so we release all allocated resource SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL); - if (pLocalMerge != NULL) { - pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo); - - if (pLocalMerge->pCtx != NULL) { - int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t i = 0; i < numOfExprs; ++i) { - SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i]; - - tVariantDestroy(&pCtx->tag); - tfree(pCtx->resultInfo); - - if (pCtx->tagInfo.pTagCtxList != NULL) { - tfree(pCtx->tagInfo.pTagCtxList); - } - } - - tfree(pLocalMerge->pCtx); - } - - tfree(pLocalMerge->prevRowOfInput); + tfree(pLocalMerge->pResultBuf); + tfree(pLocalMerge->pCtx); - tfree(pLocalMerge->pTempBuffer); - tfree(pLocalMerge->pResultBuf); - - if (pLocalMerge->pLoserTree) { - tfree(pLocalMerge->pLoserTree->param); - tfree(pLocalMerge->pLoserTree); - } + if (pLocalMerge->pLoserTree) { + tfree(pLocalMerge->pLoserTree->param); + tfree(pLocalMerge->pLoserTree); + } - tfree(pLocalMerge->pFinalRes); - tfree(pLocalMerge->discardData); + tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, + pLocalMerge->finalModel, pLocalMerge->numOfVnode); + for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) { + tfree(pLocalMerge->pLocalDataSrc[i]); + } - tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel, - pLocalMerge->numOfVnode); - for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) { - tfree(pLocalMerge->pLocalDataSrc[i]); - } + pLocalMerge->numOfBuffer = 0; + pLocalMerge->numOfCompleted = 0; + tfree(pLocalMerge->pTempBuffer); - pLocalMerge->numOfBuffer = 0; - pLocalMerge->numOfCompleted = 0; - free(pLocalMerge); - } else { - tscDebug("%p already freed or another free function is invoked", pSql); - } + free(pLocalMerge); - tscDebug("%p free local reducer finished", pSql); + tscDebug("0x%"PRIx64" free local reducer finished", pSql->self); } static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, SColumnModel *pModel) { int32_t numOfGroupByCols = 0; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(pCmd); if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols; @@ -575,11 +475,19 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); - int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols; // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { - orderColIndexList[i] = startCols++; + SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i); + for(int32_t j = 0; j < numOfInternalOutput; ++j) { + SExprInfo* pExprInfo = tscSqlExprGet(pQueryInfo, j); + + int32_t functionId = pExprInfo->base.functionId; + if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG)) { + orderColIndexList[i] = j; + break; + } + } } if (pQueryInfo->interval.interval != 0) { @@ -596,8 +504,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } else { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { orderColIndexList[0] = i; } } @@ -617,49 +525,6 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } } -bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - - // disable merge procedure for column projection query - int16_t functionId = pReducer->pCtx[0].functionId; - if (pReducer->orderPrjOnSTable) { - return true; - } - - if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { - return false; - } - - tOrderDescriptor *pOrderDesc = pReducer->pDesc; - SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo; - - // no group by columns, all data belongs to one group - int32_t numOfCols = orderInfo->numOfCols; - if (numOfCols <= 0) { - return true; - } - - if (orderInfo->colIndex[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - /* - * super table interval query - * if the order columns is the primary timestamp, all result data belongs to one group - */ - assert(pQueryInfo->interval.interval > 0); - if (numOfCols == 1) { - return true; - } - } else { // simple group by query - assert(pQueryInfo->interval.interval == 0); - } - - // only one row exists - int32_t index = orderInfo->colIndex[0]; - int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset; - - int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset); - return ret == 0; -} - int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc, SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) { SSqlCmd *pCmd = &pSql->cmd; @@ -669,12 +534,12 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr SColumnModel *pModel = NULL; *pFinalModel = NULL; - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo * pQueryInfo = tscGetActiveQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub); if (*pMemBuffer == NULL) { - tscError("%p failed to allocate memory", pSql); + tscError("0x%"PRIx64" failed to allocate memory", pSql->self); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return pRes->code; } @@ -683,20 +548,20 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size); if (pSchema == NULL) { - tscError("%p failed to allocate memory", pSql); + tscError("0x%"PRIx64" failed to allocate memory", pSql->self); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return pRes->code; } int32_t rlen = 0; for (int32_t i = 0; i < size; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i); - pSchema[i].bytes = pExpr->resBytes; - pSchema[i].type = (int8_t)pExpr->resType; - tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name)); + pSchema[i].bytes = pExpr->base.resBytes; + pSchema[i].type = (int8_t)pExpr->base.resType; + tstrncpy(pSchema[i].name, pExpr->base.aliasName, tListLen(pSchema[i].name)); - rlen += pExpr->resBytes; + rlen += pExpr->base.resBytes; } int32_t capacity = 0; @@ -729,17 +594,17 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr memset(pSchema, 0, sizeof(SSchema) * size); for (int32_t i = 0; i < size; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i); SSchema p1 = {0}; - if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { p1 = *tGetTbnameColumnSchema(); - } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { - p1.bytes = pExpr->resBytes; - p1.type = (uint8_t) pExpr->resType; - tstrncpy(p1.name, pExpr->aliasName, tListLen(p1.name)); + } else if (TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag)) { + p1.bytes = pExpr->base.resBytes; + p1.type = (uint8_t) pExpr->base.resType; + tstrncpy(p1.name, pExpr->base.aliasName, tListLen(p1.name)); } else { - p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); + p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->base.colInfo.colIndex); } int32_t inter = 0; @@ -748,7 +613,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr // the final result size and type in the same as query on single table. // so here, set the flag to be false; - int32_t functionId = pExpr->functionId; + int32_t functionId = pExpr->base.functionId; if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) { type = pModel->pFields[i].field.type; bytes = pModel->pFields[i].field.bytes; @@ -875,897 +740,567 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn } } -void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) { - // discard following dataset in the same group and reset the interpolation information - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); +//TODO it is not ordered, fix it +static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex, bool* hasPrev) { + int32_t size = (int32_t) taosArrayGetSize(pColumnList); - STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + for(int32_t i = 0; i < size; ++i) { + SColIndex* index = taosArrayGet(pColumnList, i); + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index->colIndex); + assert(index->colId == pColInfo->info.colId); - if (pFillInfo != NULL) { - int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey; - int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision); - - taosResetFillInfo(pFillInfo, revisedSTime); + memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes); } - pLocalMerge->discard = true; - pLocalMerge->discardData->num = 0; - - SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel; - tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1); + (*hasPrev) = true; } -static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) { - assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE); - - tFilePage * pBeforeFillData = pLocalMerge->pResultBuf; - - pRes->data = pLocalMerge->pFinalRes; - pRes->numOfRows = (int32_t) pBeforeFillData->num; +static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t numOfRows) { + if (numOfRows <= 1) { + return ; + } - if (pQueryInfo->limit.offset > 0) { - if (pQueryInfo->limit.offset < pRes->numOfRows) { - int32_t prevSize = (int32_t) pBeforeFillData->num; - tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); + for (int32_t k = 0; k < numOfOutput; ++k) { + if (pCtx[k].functionId != TSDB_FUNC_TAG) { + continue; + } - /* remove the hole in column model */ - tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize); + int32_t inc = numOfRows - 1; // tsdb_func_tag function only produce one row of result + char* src = pCtx[k].pOutput; - pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset; - pQueryInfo->limit.offset = 0; - } else { - pQueryInfo->limit.offset -= pRes->numOfRows; - pRes->numOfRows = 0; + for (int32_t i = 0; i < inc; ++i) { + pCtx[k].pOutput += pCtx[k].outputBytes; + memcpy(pCtx[k].pOutput, src, (size_t)pCtx[k].outputBytes); } } +} - if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) { - pRes->numOfRows = 0; - pBeforeFillData->num = 0; - pLocalMerge->discard = true; - return; - } - - pRes->numOfRowsGroup += pRes->numOfRows; - - // impose the limitation of output rows on the final result - if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { - int32_t prevSize = (int32_t)pBeforeFillData->num; - int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); - assert(overflow < pRes->numOfRows); - - pRes->numOfRowsGroup = pQueryInfo->limit.limit; - pRes->numOfRows -= overflow; - pBeforeFillData->num -= overflow; - - tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize); +static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) { + SMultiwayMergeInfo* pInfo = pOperator->info; + SQLFunctionCtx* pCtx = pInfo->binfo.pCtx; - // set remain data to be discarded, and reset the interpolation information - savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo); + char** add = calloc(pBlock->info.numOfCols, POINTER_BYTES); + for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + add[i] = pCtx[i].pInput; + pCtx[i].size = 1; } - memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize)); + for(int32_t i = 0; i < pBlock->info.rows; ++i) { + if (pInfo->hasPrev) { + if (needToMergeRv(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) { + for (int32_t j = 0; j < numOfExpr; ++j) { + pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i; + } - pRes->numOfClauseTotal += pRes->numOfRows; - pBeforeFillData->num = 0; -} + for (int32_t j = 0; j < numOfExpr; ++j) { + int32_t functionId = pCtx[j].functionId; + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + continue; + } + aAggs[functionId].mergeFunc(&pCtx[j]); + } + } else { + for(int32_t j = 0; j < numOfExpr; ++j) { // TODO refactor + int32_t functionId = pCtx[j].functionId; + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + continue; + } + aAggs[functionId].xFinalize(&pCtx[j]); + } -/* - * Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called - * by "interuptHandler" function in shell - */ -static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - - tFilePage *pBeforeFillData = pLocalMerge->pResultBuf; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - SFillInfo *pFillInfo = pLocalMerge->pFillInfo; + int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput); + setTagValueForMultipleRows(pCtx, pOperator->numOfOutput, numOfRows); - // todo extract function - int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey; + pInfo->binfo.pRes->info.rows += numOfRows; - void** pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput); - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - pResPages[i] = calloc(1, pField->bytes * pLocalMerge->resColModel->capacity); - } + for(int32_t j = 0; j < numOfExpr; ++j) { + pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows); + if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) { + pCtx[j].ptsOutputBuf = pCtx[0].pOutput; + } + } - while (1) { - int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity); + for(int32_t j = 0; j < numOfExpr; ++j) { + aAggs[pCtx[j].functionId].init(&pCtx[j]); + } - if (pQueryInfo->limit.offset < newRows) { - newRows -= pQueryInfo->limit.offset; + for (int32_t j = 0; j < numOfExpr; ++j) { + pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i; + } - if (pQueryInfo->limit.offset > 0) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - memmove(pResPages[i], ((char*)pResPages[i]) + pField->bytes * pQueryInfo->limit.offset, - (size_t)(newRows * pField->bytes)); + for (int32_t j = 0; j < numOfExpr; ++j) { + int32_t functionId = pCtx[j].functionId; + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + continue; + } + aAggs[functionId].mergeFunc(&pCtx[j]); } } - - pRes->data = pLocalMerge->pFinalRes; - pRes->numOfRows = (int32_t) newRows; - - pQueryInfo->limit.offset = 0; - break; } else { - pQueryInfo->limit.offset -= newRows; - pRes->numOfRows = 0; - - if (!taosFillHasMoreResults(pFillInfo)) { - if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted - break; - } + for (int32_t j = 0; j < numOfExpr; ++j) { + pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i; + } - // all output in current group are completed - int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity); - if (totalRemainRows <= 0) { - break; + for (int32_t j = 0; j < numOfExpr; ++j) { + int32_t functionId = pCtx[j].functionId; + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + continue; } + aAggs[functionId].mergeFunc(&pCtx[j]); } } - } - if (pRes->numOfRows > 0) { - int32_t currentTotal = (int32_t)(pRes->numOfRowsGroup + pRes->numOfRows); - - if (pQueryInfo->limit.limit >= 0 && currentTotal > pQueryInfo->limit.limit) { - int32_t overflow = (int32_t)(currentTotal - pQueryInfo->limit.limit); - - pRes->numOfRows -= overflow; - assert(pRes->numOfRows >= 0); - - /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo); - } + savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev); + } - int32_t offset = 0; - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i], (size_t)(pField->bytes * pRes->numOfRows)); - offset += pField->bytes; + { + for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + pCtx[i].pInput = add[i]; } - - pRes->numOfRowsGroup += pRes->numOfRows; - pRes->numOfClauseTotal += pRes->numOfRows; } - pBeforeFillData->num = 0; - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - tfree(pResPages[i]); - } - - tfree(pResPages); + tfree(add); } -static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) { - SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel; - assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1); - - // copy to previous temp buffer - for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) { - SSchema *pSchema = getColumnModelSchema(pColumnModel, i); - int16_t offset = getColumnModelOffset(pColumnModel, i); - - memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes); +bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) { + int32_t ret = 0; + size_t size = taosArrayGetSize(columnIndexList); + if (size > 0) { + ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC); } - tmpBuffer->num = 0; - pLocalMerge->hasPrevRow = true; + // if ret == 0, means the result belongs to the same group + return (ret == 0); } -static void doExecuteFinalMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) { - // the tag columns need to be set before all functions execution - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - - size_t size = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t j = 0; j < size; ++j) { - SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j]; - - // tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer - int32_t functionId = pCtx->functionId; - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) { - tVariantDestroy(&pCtx->tag); - char* input = pCtx->pInput; - - if (pCtx->inputType == TSDB_DATA_TYPE_BINARY || pCtx->inputType == TSDB_DATA_TYPE_NCHAR) { - assert(varDataLen(input) <= pCtx->inputBytes); - tVariantCreateFromBinary(&pCtx->tag, varDataVal(input), varDataLen(input), pCtx->inputType); - } else { - tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType); - } - - } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j); - pCtx->param[0].i64 = pExpr->param[0].i64; - } - - pCtx->currentStage = MERGE_STAGE; +static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) { + return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted); +} - if (needInit) { - aAggs[pCtx->functionId].init(pCtx); - } +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) { + SSqlRes *pRes = &pObj->res; + if (pRes->pLocalMerger != NULL) { + tscDestroyLocalMerger(pObj); } - for (int32_t j = 0; j < size; ++j) { - int32_t functionId = pLocalMerge->pCtx[j].functionId; - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { - continue; - } + pRes->qId = 1; // hack to pass the safety check in fetch_row function + pRes->numOfRows = 0; + pRes->row = 0; - aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]); - } -} + pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet + pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger)); -static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) { - if (pLocalMerge->hasUnprocessedRow) { - pLocalMerge->hasUnprocessedRow = false; - doExecuteFinalMerge(pCmd, pLocalMerge, true); - savePreviousRow(pLocalMerge, tmpBuffer); - } + /* + * we need one additional byte space + * the sprintf function needs one additional space to put '\0' at the end of string + */ + size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1; + pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize); + + pRes->pLocalMerger->pResultBuf->num = numOfRes; + pRes->data = pRes->pLocalMerger->pResultBuf->data; } -static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) { - int64_t maxOutput = 0; - - size_t size = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t j = 0; j < size; ++j) { - /* - * ts, tag, tagprj function can not decide the output number of current query - * the number of output result is decided by main output - */ - int32_t functionId = pCtx[j].functionId; - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) { - continue; - } +int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { + int32_t maxRowSize = MAX(rowSize, finalRowSize); + char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize)); - SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]); - if (maxOutput < pResInfo->numOfRes) { - maxOutput = pResInfo->numOfRes; - } - } + size_t size = tscNumOfFields(pQueryInfo); + SArithmeticSupport arithSup = {0}; - return maxOutput; -} + // todo refactor + arithSup.offset = 0; + arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + arithSup.exprList = pQueryInfo->exprList; + arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); -/* - * in handling the top/bottom query, which produce more than one rows result, - * the tsdb_func_tags only fill the first row of results, the remain rows need to - * filled with the same result, which is the tags, specified in group by clause - * - */ -static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) { - int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer - size_t size = tscSqlExprNumOfExprs(pQueryInfo); - - for (int32_t k = 0; k < size; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); - if (maxBufSize < pExpr->resBytes && pExpr->functionId == TSDB_FUNC_TAG) { - maxBufSize = pExpr->resBytes; - } + for(int32_t k = 0; k < arithSup.numOfCols; ++k) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, k); + arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->base.offset); } - assert(maxBufSize >= 0); - - char *buf = malloc((size_t)maxBufSize); - for (int32_t k = 0; k < size; ++k) { - SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k]; - if (pCtx->functionId != TSDB_FUNC_TAG) { - continue; - } - - int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result - memset(buf, 0, (size_t)maxBufSize); - memcpy(buf, pCtx->pOutput, (size_t)pCtx->outputBytes); + int32_t offset = 0; - char* next = pCtx->pOutput; - for (int32_t i = 0; i < inc; ++i) { - next += pCtx->outputBytes; - memcpy(next, buf, (size_t)pCtx->outputBytes); + for (int i = 0; i < size; ++i) { + SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + // calculate the result from several other columns + if (pSup->pExpr->pExpr != NULL) { + arithSup.pExprInfo = pSup->pExpr; + arithmeticTreeTraverse(arithSup.pExprInfo->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc); + } else { + SExprInfo* pExpr = pSup->pExpr; + memcpy(pbuf + pOutput->num * offset, pExpr->base.offset * pOutput->num + pOutput->data, (size_t)(pExpr->base.resBytes * pOutput->num)); } - } - - free(buf); -} -int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) { - size_t size = tscSqlExprNumOfExprs(pQueryInfo); - - for (int32_t k = 0; k < size; ++k) { - SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k]; - aAggs[pCtx->functionId].xFinalize(pCtx); + offset += pSup->field.bytes; } - pLocalMerge->hasPrevRow = false; + memcpy(pOutput->data, pbuf, (size_t)(pOutput->num * offset)); - int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx); - pLocalMerge->pResultBuf->num += numOfRes; + tfree(pbuf); + tfree(arithSup.data); - fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge); - return numOfRes; + return offset; } -/* - * points merge: - * points are merged according to the sort info, which is tags columns and timestamp column. - * In case of points without either tags columns or timestamp, such as - * results generated by simple aggregation function, we merge them all into one points - * *Exception*: column projection query, required no merge procedure - */ -bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) { - int32_t ret = 0; // merge all result by default +#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ + (data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes) - int16_t functionId = pLocalMerge->pCtx[0].functionId; +static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex, + int32_t maxRows) { + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i); + char* p = pColInfo->pData + pBlock->info.rows * pColInfo->info.bytes; - // todo opt performance - if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0) && pQueryInfo->distinctTag == false)) { // column projection query - ret = 1; // disable merge procedure - } else { - tOrderDescriptor *pDesc = pLocalMerge->pDesc; - if (pDesc->orderInfo.numOfCols > 0) { - if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc - // todo refactor comparator - ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data); - } else { // desc - ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data); - } - } + char *src = COLMODEL_GET_VAL(buf, pModel, maxRows, rowIndex, i); + memmove(p, src, pColInfo->info.bytes); } - /* if ret == 0, means the result belongs to the same group */ - return (ret == 0); + pBlock->info.rows += 1; } -static bool reachGroupResultLimit(SQueryInfo *pQueryInfo, SSqlRes *pRes) { - return (pRes->numOfGroups >= pQueryInfo->slimit.limit && pQueryInfo->slimit.limit >= 0); -} - -static bool saveGroupResultInfo(SSqlObj *pSql) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - - if (pRes->numOfRowsGroup > 0) { - pRes->numOfGroups += 1; +SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; } - // the output group is limited by the slimit clause - if (reachGroupResultLimit(pQueryInfo, pRes)) { - return true; - } - - // pRes->pGroupRec = realloc(pRes->pGroupRec, pRes->numOfGroups*sizeof(SResRec)); - // pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows; - // pRes->pGroupRec[pRes->numOfGroups-1].numOfClauseTotal = pRes->numOfClauseTotal; - - return false; -} + SMultiwayMergeInfo *pInfo = pOperator->info; + SLocalMerger *pMerger = pInfo->pMerge; + SLoserTreeInfo *pTree = pMerger->pLoserTree; + SColumnModel *pModel = pMerger->pDesc->pColumnModel; + tFilePage *tmpBuffer = pMerger->pTempBuffer; -bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) { - bool qualified = false; - - for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) { - __filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k); - SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]}; - - bool isnull = isNull(input, type); - if (isnull) { - if (fp == isNullOperator) { - qualified = true; - break; - } else { - continue; - } - } else { - if (fp == notNullOperator) { - qualified = true; - break; - } else if (fp == isNullOperator) { - continue; - } - } + pInfo->binfo.pRes->info.rows = 0; - if (fp(&filterElem, input, input, type)) { - qualified = true; + while(1) { + if (isAllSourcesCompleted(pMerger)) { break; } - } - - *notSkipped = qualified; - - return TSDB_CODE_SUCCESS; -} +#ifdef _DEBUG_VIEW + printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index); +#endif -int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) { - *notSkipped = true; - - if (pQueryInfo->havingFieldNum <= 0) { - return TSDB_CODE_SUCCESS; - } + assert((pTree->pNode[0].index < pMerger->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0); - //int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + // chosen from loser tree + SLocalDataSource *pOneDataSrc = pMerger->pLocalDataSrc[pTree->pNode[0].index]; + bool sameGroup = true; + if (pInfo->hasPrev) { + int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList); + + // if this row belongs to current result set group + for (int32_t i = 0; i < numOfCols; ++i) { + SColIndex * pIndex = taosArrayGet(pInfo->orderColumnList, i); + SColumnInfoData *pColInfo = taosArrayGet(pInfo->binfo.pRes->pDataBlock, pIndex->colIndex); + + char *newRow = + COLMODEL_GET_VAL(pOneDataSrc->filePage.data, pModel, pOneDataSrc->pMemBuffer->pColumnModel->capacity, + pOneDataSrc->rowIdx, pIndex->colIndex); + + char * data = pInfo->prevRow[i]; + int32_t ret = columnValueAscendingComparator(data, newRow, pColInfo->info.type, pColInfo->info.bytes); + if (ret == 0) { + continue; + } else { + sameGroup = false; + *newgroup = true; + break; + } + } + } - size_t numOfOutput = tscNumOfFields(pQueryInfo); - for(int32_t i = 0; i < numOfOutput; ++i) { - SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); - SExprFilter* pFieldFilters = pInterField->pFieldFilters; + if (!sameGroup || !pInfo->hasPrev) { //save the data + int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList); - if (pFieldFilters == NULL) { - continue; - } + for (int32_t i = 0; i < numOfCols; ++i) { + SColIndex * pIndex = taosArrayGet(pInfo->orderColumnList, i); + SColumnInfoData *pColInfo = taosArrayGet(pInfo->binfo.pRes->pDataBlock, pIndex->colIndex); - int32_t type = pInterField->field.type; + char *curCol = + COLMODEL_GET_VAL(pOneDataSrc->filePage.data, pModel, pOneDataSrc->pMemBuffer->pColumnModel->capacity, + pOneDataSrc->rowIdx, pIndex->colIndex); + memcpy(pInfo->prevRow[i], curCol, pColInfo->info.bytes); + } - char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset; - - doFilterFieldData(pInput, pFieldFilters, type, notSkipped); - if (*notSkipped == false) { - return TSDB_CODE_SUCCESS; + pInfo->hasPrev = true; } - } - - return TSDB_CODE_SUCCESS; -} + if (!sameGroup && pInfo->binfo.pRes->info.rows > 0) { + return pInfo->binfo.pRes; + } + appendOneRowToDataBlock(pInfo->binfo.pRes, pOneDataSrc->filePage.data, pModel, pOneDataSrc->rowIdx, pOneDataSrc->pMemBuffer->pColumnModel->capacity); -/** - * - * @param pSql - * @param pLocalMerge - * @param noMoreCurrentGroupRes - * @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups - */ -bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; +#if defined(_DEBUG_VIEW) + printf("chosen row:\t"); + SSrcColumnInfo colInfo[256] = {0}; + tscGetSrcColumnInfo(colInfo, pQueryInfo); - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - tFilePage * pResBuf = pLocalMerge->pResultBuf; - SColumnModel *pModel = pLocalMerge->resColModel; + tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo); +#endif - pRes->code = TSDB_CODE_SUCCESS; + pOneDataSrc->rowIdx += 1; + adjustLoserTreeFromNewData(pMerger, pOneDataSrc, pTree); - /* - * Ignore the output of the current group since this group is skipped by user - * We set the numOfRows to be 0 and discard the possible remain results. - */ - if (pQueryInfo->slimit.offset > 0) { - pRes->numOfRows = 0; - pQueryInfo->slimit.offset -= 1; - pLocalMerge->discard = !noMoreCurrentGroupRes; - - if (pLocalMerge->discard) { - SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel; - tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1); + if (pInfo->binfo.pRes->info.rows >= pInfo->bufCapacity) { + return pInfo->binfo.pRes; } - - return false; } - tColModelCompact(pModel, pResBuf, pModel->capacity); - - if (tscIsSecondStageQuery(pQueryInfo)) { - doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize); - } - - bool notSkipped = true; - - doHavingFilter(pQueryInfo, pResBuf, ¬Skipped); - - if (!notSkipped) { - pRes->numOfRows = 0; - pLocalMerge->discard = !noMoreCurrentGroupRes; + pOperator->status = OP_EXEC_DONE; + return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL; +} - if (pLocalMerge->discard) { - SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel; - tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1); - } - - return notSkipped; - } +static bool isSameGroupRv(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) { + int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList); + for (int32_t i = 0; i < numOfCols; ++i) { + SColIndex *pIndex = taosArrayGet(orderColumnList, i); - // no interval query, no fill operation - if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { - genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo); - } else { - SFillInfo* pFillInfo = pLocalMerge->pFillInfo; - if (pFillInfo != NULL) { - TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey; + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex); + assert(pIndex->colId == pColInfo->info.colId); - taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, ekey); - taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); + char *data = dataCols[i]; + int32_t ret = columnValueAscendingComparator(data, pColInfo->pData, pColInfo->info.type, pColInfo->info.bytes); + if (ret == 0) { + continue; + } else { + return false; } - - doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes); } return true; } -void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning - size_t t = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t i = 0; i < t; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - pLocalMerge->pCtx[i].pOutput = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity; - - if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) { - pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].pOutput; - } +SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; } - memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage)); -} - -static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) { - // In handling data in other groups, we need to reset the interpolation information for a new group data - pRes->numOfRows = 0; - pRes->numOfRowsGroup = 0; - - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - - pQueryInfo->limit.offset = pLocalMerge->offset; - - STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); - STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - - // for group result interpolation, do not return if not data is generated - if (pQueryInfo->fillType != TSDB_FILL_NONE) { - TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey); - int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision); - taosResetFillInfo(pLocalMerge->pFillInfo, newTime); - } -} + SMultiwayMergeInfo *pAggInfo = pOperator->info; + SOperatorInfo *upstream = pOperator->upstream; -static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) { - return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted); -} + *newgroup = false; + bool handleData = false; + pAggInfo->binfo.pRes->info.rows = 0; -static bool doBuildFilledResultForGroup(SSqlObj *pSql) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; + { + if (pAggInfo->hasDataBlockForNewGroup) { + pAggInfo->binfo.pRes->info.rows = 0; + pAggInfo->hasPrev = false; // now we start from a new group data set. - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - SLocalMerger *pLocalMerge = pRes->pLocalMerger; - SFillInfo *pFillInfo = pLocalMerge->pFillInfo; + // not belongs to the same group, return the result of current group; + setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pAggInfo->pExistBlock, TSDB_ORDER_ASC); + updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pAggInfo->pExistBlock->info.rows); - if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) { - assert(pQueryInfo->fillType != TSDB_FILL_NONE); + { // reset output buffer + for(int32_t j = 0; j < pOperator->numOfOutput; ++j) { + aAggs[pAggInfo->binfo.pCtx[j].functionId].init(&pAggInfo->binfo.pCtx[j]); + } + } - tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf; - int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1)); + doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock); - // the first column must be the timestamp column - int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity); - if (rows > 0) { // do fill gap - doFillResult(pSql, pLocalMerge, false); + savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0, + &pAggInfo->hasGroupColData); + pAggInfo->pExistBlock = NULL; + pAggInfo->hasDataBlockForNewGroup = false; + handleData = true; + *newgroup = true; } - - return true; - } else { - return false; } -} - -static bool doHandleLastRemainData(SSqlObj *pSql) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - SLocalMerger *pLocalMerge = pRes->pLocalMerger; - SFillInfo *pFillInfo = pLocalMerge->pFillInfo; + SSDataBlock* pBlock = NULL; + while(1) { + bool prev = *newgroup; + pBlock = upstream->exec(upstream, newgroup); + if (pBlock == NULL) { + *newgroup = prev; + break; + } - bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow; + if (pAggInfo->hasGroupColData) { + bool sameGroup = isSameGroupRv(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData); + if (!sameGroup) { + *newgroup = true; + pAggInfo->hasDataBlockForNewGroup = true; + pAggInfo->pExistBlock = pBlock; + savePrevOrderColumns(pAggInfo->prevRow, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasPrev); + break; + } + } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + // not belongs to the same group, return the result of current group + setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC); + updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor); - if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL || - prevGroupCompleted) { - // if fillType == TSDB_FILL_NONE, return directly - if (pQueryInfo->fillType != TSDB_FILL_NONE && - ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { - int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey; + doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pBlock); + savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData); + handleData = true; + } - int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity); - if (rows > 0) { - doFillResult(pSql, pLocalMerge, true); + if (handleData) { // data in current group is all handled + for(int32_t j = 0; j < pOperator->numOfOutput; ++j) { + int32_t functionId = pAggInfo->binfo.pCtx[j].functionId; + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + continue; } - } - /* - * 1. numOfRows == 0, means no interpolation results are generated. - * 2. if all local data sources are consumed, and no un-processed rows exist. - * - * No results will be generated and query completed. - */ - if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) { - return true; + aAggs[functionId].xFinalize(&pAggInfo->binfo.pCtx[j]); } - // start to process result for a new group and save the result info of previous group - if (saveGroupResultInfo(pSql)) { - return true; - } + int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pAggInfo->binfo.pCtx, pOperator->numOfOutput); + pAggInfo->binfo.pRes->info.rows += numOfRows; - resetEnvForNewResultset(pRes, pCmd, pLocalMerge); + setTagValueForMultipleRows(pAggInfo->binfo.pCtx, pOperator->numOfOutput, numOfRows); } - return false; -} + SSDataBlock* pRes = pAggInfo->binfo.pRes; + { + SColumnInfoData* pInfoData = taosArrayGet(pRes->pDataBlock, 0); -static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - - SLocalMerger *pLocalMerge = pRes->pLocalMerger; - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - size_t size = tscSqlExprNumOfExprs(pQueryInfo); + if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pRes->info.rows > 0) { + STimeWindow* w = &pRes->info.window; - for (int32_t k = 0; k < size; ++k) { - SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k]; - pCtx->pOutput += pCtx->outputBytes * numOfRes; + // TODO in case of desc order, swap it + w->skey = *(int64_t*)pInfoData->pData; + w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pRes->info.rows - 1)); - // set the correct output timestamp column position - if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) { - pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes); + if (pOperator->pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_DESC) { + SWAP(w->skey, w->ekey, TSKEY); + assert(w->skey <= w->ekey); + } } } - doExecuteFinalMerge(pCmd, pLocalMerge, true); + return (pRes->info.rows != 0)? pRes:NULL; } -int32_t tscDoLocalMerge(SSqlObj *pSql) { - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - - tscResetForNextRetrieve(pRes); +static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) { + SSLimitOperatorInfo *pInfo = pOperator->info; + assert(pInfo->currentGroupOffset >= 0); - if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed - if (pRes->code == TSDB_CODE_SUCCESS) { - return pRes->code; + SSDataBlock* pBlock = NULL; + if (pInfo->currentGroupOffset == 0) { + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; } - tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code)); - return pRes->code; - } - - SLocalMerger *pLocalMerge = pRes->pLocalMerger; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - tFilePage *tmpBuffer = pLocalMerge->pTempBuffer; - - int32_t remain = 1; - if (tscIsTopbotQuery(pQueryInfo)) { - remain = tscGetTopbotQueryParam(pQueryInfo); - } - - if (doHandleLastRemainData(pSql)) { - return TSDB_CODE_SUCCESS; - } + if (*newgroup == false && pInfo->limit.limit > 0 && pInfo->rowsTotal >= pInfo->limit.limit) { + while ((*newgroup) == false) { // ignore the remain blocks + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; + } + } + } - if (doBuildFilledResultForGroup(pSql)) { - return TSDB_CODE_SUCCESS; + return pBlock; } - SLoserTreeInfo *pTree = pLocalMerge->pLoserTree; - - // clear buffer - handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer); - SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel; - - while (1) { - if (isAllSourcesCompleted(pLocalMerge)) { - break; + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; } -#ifdef _DEBUG_VIEW - printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index); -#endif - assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0); - - // chosen from loser tree - SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index]; - - tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1, - pOneDataSrc->pMemBuffer->pColumnModel->capacity); - -#if defined(_DEBUG_VIEW) - printf("chosen row:\t"); - SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, pQueryInfo); - - tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo); -#endif - - if (pLocalMerge->discard) { - assert(pLocalMerge->hasUnprocessedRow == false); - - /* current record belongs to the same group of previous record, need to discard it */ - if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) { - tmpBuffer->num = 0; - pOneDataSrc->rowIdx += 1; - - adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree); - - // all inputs are exhausted, abort current process - if (isAllSourcesCompleted(pLocalMerge)) { - break; - } - - // data belongs to the same group needs to be discarded - continue; - } else { - pLocalMerge->discard = false; - pLocalMerge->discardData->num = 0; + while(1) { + if (*newgroup) { + pInfo->currentGroupOffset -= 1; + *newgroup = false; + } - if (saveGroupResultInfo(pSql)) { - return TSDB_CODE_SUCCESS; + while ((*newgroup) == false) { + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; } - - resetEnvForNewResultset(pRes, pCmd, pLocalMerge); } - } - - if (pLocalMerge->hasPrevRow) { - if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) { - // belong to the group of the previous row, continue process it - doExecuteFinalMerge(pCmd, pLocalMerge, false); - - // copy to buffer - savePreviousRow(pLocalMerge, tmpBuffer); - } else { - /* - * current row does not belong to the group of previous row. - * so the processing of previous group is completed. - */ - int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge); - bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer); - - tFilePage *pResBuf = pLocalMerge->pResultBuf; - - /* - * if the previous group does NOT generate any result (pResBuf->num == 0), - * continue to process results instead of return results. - */ - if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num + remain >= pLocalMerge->resColModel->capacity)) { - // does not belong to the same group - bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup); - - // this row needs to discard, since it belongs to the group of previous - if (pLocalMerge->discard && sameGroup) { - pLocalMerge->hasUnprocessedRow = false; - tmpBuffer->num = 0; - } else { // current row does not belongs to the previous group, so it is not be handled yet. - pLocalMerge->hasUnprocessedRow = true; - } - - resetOutputBuf(pQueryInfo, pLocalMerge); - pOneDataSrc->rowIdx += 1; - - // here we do not check the return value - adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree); - - if (pRes->numOfRows == 0) { - handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer); - - if (!sameGroup) { - /* - * previous group is done, prepare for the next group - * If previous group is not skipped, keep it in pRes->numOfGroups - */ - if (notSkipped && saveGroupResultInfo(pSql)) { - return TSDB_CODE_SUCCESS; - } - - resetEnvForNewResultset(pRes, pCmd, pLocalMerge); - } - } else { - /* - * if next record belongs to a new group, we do not handle this record here. - * We start the process in a new round. - */ - if (sameGroup) { - handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer); - } - } - // current group has no result, - if (pRes->numOfRows == 0) { - continue; - } else { - return TSDB_CODE_SUCCESS; - } - } else { // result buffer is not full - doProcessResultInNextWindow(pSql, numOfRes); - savePreviousRow(pLocalMerge, tmpBuffer); - } + // now we have got the first data block of the next group. + if (pInfo->currentGroupOffset == 0) { + return pBlock; } - } else { - doExecuteFinalMerge(pCmd, pLocalMerge, true); - savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer } - pOneDataSrc->rowIdx += 1; - adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree); - } + return NULL; +} - if (pLocalMerge->hasPrevRow) { - finalizeRes(pQueryInfo, pLocalMerge); +SSDataBlock* doSLimit(void* param, bool* newgroup) { + SOperatorInfo *pOperator = (SOperatorInfo *)param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; } - if (pLocalMerge->pResultBuf->num) { - genFinalResults(pSql, pLocalMerge, true); - } + SSLimitOperatorInfo *pInfo = pOperator->info; - return TSDB_CODE_SUCCESS; -} + SSDataBlock *pBlock = NULL; + while (1) { + pBlock = skipGroupBlock(pOperator, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; + } -void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) { - SSqlRes *pRes = &pObj->res; - if (pRes->pLocalMerger != NULL) { - tscDestroyLocalMerger(pObj); - } + if (*newgroup) { // a new group arrives + pInfo->groupTotal += 1; + pInfo->rowsTotal = 0; + pInfo->currentOffset = pInfo->limit.offset; + } - pRes->qId = 1; // hack to pass the safety check in fetch_row function - pRes->numOfRows = 0; - pRes->row = 0; + assert(pInfo->currentGroupOffset == 0); - pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet - pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger)); + if (pInfo->currentOffset >= pBlock->info.rows) { + pInfo->currentOffset -= pBlock->info.rows; + } else { + if (pInfo->currentOffset == 0) { + break; + } - /* - * we need one additional byte space - * the sprintf function needs one additional space to put '\0' at the end of string - */ - size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1; - pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize); + int32_t remain = (int32_t)(pBlock->info.rows - pInfo->currentOffset); + pBlock->info.rows = remain; - pRes->pLocalMerger->pResultBuf->num = numOfRes; - pRes->data = pRes->pLocalMerger->pResultBuf->data; -} + // move the remain rows of this data block to the front. + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i); -int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { - int32_t maxRowSize = MAX(rowSize, finalRowSize); - char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize)); - - size_t size = tscNumOfFields(pQueryInfo); - SArithmeticSupport arithSup = {0}; + int16_t bytes = pColInfoData->info.bytes; + memmove(pColInfoData->pData, pColInfoData->pData + bytes * pInfo->currentOffset, remain * bytes); + } - // todo refactor - arithSup.offset = 0; - arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); - arithSup.exprList = pQueryInfo->exprList; - arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); + pInfo->currentOffset = 0; + break; + } + } - for(int32_t k = 0; k < arithSup.numOfCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); - arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset); + if (pInfo->slimit.limit > 0 && pInfo->groupTotal > pInfo->slimit.limit) { // reach the group limit, abort + return NULL; } - int32_t offset = 0; + if (pInfo->limit.limit > 0 && (pInfo->rowsTotal + pBlock->info.rows >= pInfo->limit.limit)) { + pBlock->info.rows = (int32_t)(pInfo->limit.limit - pInfo->rowsTotal); + pInfo->rowsTotal = pInfo->limit.limit; - for (int i = 0; i < size; ++i) { - SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); - - // calculate the result from several other columns - if (pSup->pArithExprInfo != NULL) { - arithSup.pArithExpr = pSup->pArithExprInfo; - arithmeticTreeTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc); - } else { - SSqlExpr* pExpr = pSup->pSqlExpr; - memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, (size_t)(pExpr->resBytes * pOutput->num)); + if (pInfo->slimit.limit > 0 && pInfo->groupTotal >= pInfo->slimit.limit) { + pOperator->status = OP_EXEC_DONE; } - offset += pSup->field.bytes; + // setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + } else { + pInfo->rowsTotal += pBlock->info.rows; } - memcpy(pOutput->data, pbuf, (size_t)(pOutput->num * offset)); - - tfree(pbuf); - tfree(arithSup.data); - - return offset; + return pBlock; } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 920937928fae775305e0b4e695092bc6d458ff6d..6b88c9074738204d99f2dc536100a68a1656ea77 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -748,7 +748,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC const int32_t STABLE_INDEX = 1; SSqlCmd * pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); char *sql = *sqlstr; @@ -829,6 +829,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC index = 0; sToken = tStrGetToken(sql, &index, false); if (sToken.type != TK_TAGS && sToken.type != TK_LP) { + tscDestroyBoundColumnInfo(&spd); return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); } @@ -841,6 +842,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC char* end = NULL; code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end); if (code != TSDB_CODE_SUCCESS) { + tscDestroyBoundColumnInfo(&spd); return code; } @@ -858,11 +860,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC sql += index; if (sToken.type != TK_LP) { + tscDestroyBoundColumnInfo(&spd); return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z); } SKVRowBuilder kvRowBuilder = {0}; if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { + tscDestroyBoundColumnInfo(&spd); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -875,6 +879,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC if (TK_ILLEGAL == sToken.type) { tdDestroyKVRowBuilder(&kvRowBuilder); + tscDestroyBoundColumnInfo(&spd); return TSDB_CODE_TSC_INVALID_SQL; } @@ -892,6 +897,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision); if (code != TSDB_CODE_SUCCESS) { tdDestroyKVRowBuilder(&kvRowBuilder); + tscDestroyBoundColumnInfo(&spd); return code; } @@ -1065,7 +1071,7 @@ int tsParseInsertSql(SSqlObj *pSql) { int32_t totalNum = 0; int32_t code = TSDB_CODE_SUCCESS; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); assert(pQueryInfo != NULL); STableMetaInfo *pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo):tscGetMetaInfo(pQueryInfo, 0); @@ -1089,7 +1095,7 @@ int tsParseInsertSql(SSqlObj *pSql) { str = pCmd->curSql; } - tscDebug("%p create data block list hashList:%p", pSql, pCmd->pTableBlockHashList); + tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->pTableBlockHashList); while (1) { int32_t index = 0; @@ -1141,7 +1147,7 @@ int tsParseInsertSql(SSqlObj *pSql) { return code; } - tscError("%p async insert parse error, code:%s", pSql, tstrerror(code)); + tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code)); pCmd->curSql = NULL; goto _clean; } @@ -1285,7 +1291,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) { pCmd->count = 0; pCmd->command = TSDB_SQL_INSERT; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd, pCmd->clauseIndex); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType); @@ -1303,7 +1309,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) { SSqlCmd* pCmd = &pSql->cmd; if ((!pCmd->parseFinished) && (!initial)) { - tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql); + tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql); } ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); @@ -1375,7 +1381,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock return code; } - return tscProcessSql(pSql); + return tscBuildAndSendRequest(pSql, NULL); } typedef struct SImportFileSupport { @@ -1409,7 +1415,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow assert(pSql->res.numOfRows == 0); int32_t ret = fseek(fp, 0, SEEK_SET); if (ret < 0) { - tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno)); + tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno)); code = TAOS_SYSTEM_ERROR(errno); goto _error; } @@ -1521,6 +1527,7 @@ void tscImportDataFromFile(SSqlObj *pSql) { } assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && strlen(pCmd->payload) != 0); + pCmd->active = pCmd->pQueryInfo[0]; SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport)); SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL); @@ -1529,7 +1536,7 @@ void tscImportDataFromFile(SSqlObj *pSql) { FILE *fp = fopen(pCmd->payload, "rb"); if (fp == NULL) { pSql->res.code = TAOS_SYSTEM_ERROR(errno); - tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); + tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code)); tfree(pSupporter); taos_free_result(pNew); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 4efaf7c2b516019052018c508306e4736322a284..c3c8986e2f7d07692fe2a8aaf3200637a6dc97fb 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -815,7 +815,7 @@ static int insertStmtExecute(STscStmt* stmt) { pRes->numOfRows = 0; pRes->numOfTotal = 0; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); // wait for the callback function to post the semaphore tsem_wait(&pSql->rspSem); diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 3b0e1b5775f13e619995ef8145ba4a09533f7a49..777a136a6e215110aaebb2ff93f97456dd215dcf 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -61,7 +61,7 @@ void tscAddIntoSqlList(SSqlObj *pSql) { pSql->stime = taosGetTimestampMs(); pSql->listed = 1; - tscDebug("%p added into sqlList", pSql); + tscDebug("0x%"PRIx64" added into sqlList", pSql->self); } void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) { @@ -99,12 +99,12 @@ void tscSaveSlowQuery(SSqlObj *pSql) { return; } - tscDebug("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); + tscDebug("0x%"PRIx64" query time:%" PRId64 " sql:%s", pSql->self, pSql->res.useconds, pSql->sqlstr); int32_t sqlSize = (int32_t)(TSDB_SLOW_QUERY_SQL_LEN + size); char *sql = malloc(sqlSize); if (sql == NULL) { - tscError("%p failed to allocate memory to sent slow query to dnode", pSql); + tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self); return; } @@ -141,7 +141,7 @@ void tscRemoveFromSqlList(SSqlObj *pSql) { pSql->listed = 0; tscSaveSlowQuery(pSql); - tscDebug("%p removed from sqlList", pSql); + tscDebug("0x%"PRIx64" removed from sqlList", pSql->self); } void tscKillQuery(STscObj *pObj, uint32_t killId) { @@ -158,7 +158,7 @@ void tscKillQuery(STscObj *pObj, uint32_t killId) { if (pSql == NULL) { tscError("failed to kill query, id:%d, it may have completed/terminated", killId); } else { - tscDebug("%p query is killed, queryId:%d", pSql, killId); + tscDebug("0x%"PRIx64" query is killed, queryId:%d", pSql->self, killId); taos_stop_query(pSql); } } @@ -213,7 +213,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { pthread_mutex_unlock(&pObj->mutex); if (pStream) { - tscDebug("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); + tscDebug("0x%"PRIx64" stream:%p is killed, streamId:%d", pStream->pSql->self, pStream, killId); if (pStream->callback) { pStream->callback(pStream->param); } @@ -249,8 +249,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { pQdesc->stime = htobe64(pSql->stime); pQdesc->queryId = htonl(pSql->queryId); //pQdesc->useconds = htobe64(pSql->res.useconds); - pQdesc->useconds = htobe64(now - pSql->stime); // use local time instead of sever rsp elapsed time - pQdesc->qHandle = htobe64(pSql->res.qId); + pQdesc->useconds = htobe64(now - pSql->stime); + pQdesc->qId = htobe64(pSql->res.qId); pHeartbeat->numOfQueries++; pQdesc++; @@ -273,7 +273,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { pSdesc->num = htobe64(pStream->num); pSdesc->useconds = htobe64(pStream->useconds); - pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval); + pSdesc->stime = (pStream->stime == INT64_MIN) ? htobe64(pStream->stime) : htobe64(pStream->stime - pStream->interval.interval); pSdesc->ctime = htobe64(pStream->ctime); pSdesc->slidingTime = htobe64(pStream->interval.sliding); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e8f753f876ea27d9d3d4e312d7a2de2d84e2f066..87b4669a04f98b3e1938f98684233b2285f429f8 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -57,7 +57,7 @@ typedef struct SConvertFunc { int32_t execFuncId; } SConvertFunc; -static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex); +static SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); static char* getAccountId(SSqlObj* pSql); @@ -74,27 +74,27 @@ static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, int32_t na static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult); static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, - int8_t type, char* fieldName, SSqlExpr* pSqlExpr); + int8_t type, char* fieldName, SExprInfo* pSqlExpr); static uint8_t convertOptr(SStrToken *pToken); -static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery); +static int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery, bool timeWindowQuery); static bool validateIpAddress(const char* ip, size_t size); static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool twQuery); -static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd); +static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd); -static int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode); +static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken); static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding); static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem); -static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql); -static int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySQL); -static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode, SSchema* pSchema); +static int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql); +static int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); +static int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema); static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); @@ -111,24 +111,25 @@ static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo); static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo); -static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySqlNode* pQuerySqlNode, SSqlObj* pSql); +static int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SSqlNode* pSqlNode, SSqlObj* pSql); static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql); static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); -static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode); +static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate); -static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); +static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo); static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); -static int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index); -static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +static int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, int32_t index); +static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid); static bool validateDebugFlag(int32_t v); +static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static bool isTimeWindowQuery(SQueryInfo* pQueryInfo) { return pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0; @@ -258,7 +259,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->msg); } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfoS(pCmd, pCmd->clauseIndex); if (pQueryInfo == NULL) { pRes->code = terrno; return pRes->code; @@ -617,35 +618,38 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_SELECT: { const char* msg1 = "columns in select clause not identical"; - for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) { - SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i); - if (pqi == NULL) { + size_t size = taosArrayGetSize(pInfo->list); + for (int32_t i = pCmd->numOfClause; i < size; ++i) { + SQueryInfo* p = tscGetQueryInfoS(pCmd, i); + if (p == NULL) { pRes->code = terrno; return pRes->code; } } - assert(pCmd->numOfClause == pInfo->subclauseInfo.numOfClause); - for (int32_t i = pCmd->clauseIndex; i < pInfo->subclauseInfo.numOfClause; ++i) { - SQuerySqlNode* pQuerySqlNode = pInfo->subclauseInfo.pClause[i]; - tscTrace("%p start to parse %dth subclause, total:%d", pSql, i, pInfo->subclauseInfo.numOfClause); - if ((code = doValidateSqlNode(pSql, pQuerySqlNode, i)) != TSDB_CODE_SUCCESS) { + assert(pCmd->numOfClause == size); + for (int32_t i = pCmd->clauseIndex; i < size; ++i) { + SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i); + tscTrace("%p start to parse %dth subclause, total:%d", pSql, i, (int32_t) size); + if ((code = validateSqlNode(pSql, pSqlNode, i)) != TSDB_CODE_SUCCESS) { return code; } - tscPrintSelectClause(pSql, i); + tscPrintSelNodeList(pSql, i); pCmd->clauseIndex += 1; } // restore the clause index pCmd->clauseIndex = 0; + // set the command/global limit parameters from the first subclause to the sqlcmd object - SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0); pCmd->command = pQueryInfo1->command; // if there is only one element, the limit of clause is the limit of global result. + // validate the select node for "UNION ALL" subclause for (int32_t i = 1; i < pCmd->numOfClause; ++i) { - SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); + SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i); int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); if (ret != 0) { @@ -706,7 +710,7 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { return true; @@ -718,7 +722,7 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { // need to add timestamp column in result set, if it is a time window query static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo) { - uint64_t uid = tscSqlExprGet(pQueryInfo, 0)->uid; + uint64_t uid = tscSqlExprGet(pQueryInfo, 0)->base.uid; int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { @@ -765,8 +769,8 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn */ size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } @@ -782,7 +786,7 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn return addPrimaryTsColumnForTimeWindowQuery(pQueryInfo); } -int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode) { +int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) { const char* msg2 = "interval cannot be less than 10 ms"; const char* msg3 = "sliding cannot be used without interval"; @@ -791,8 +795,8 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - if (!TPARSER_HAS_TOKEN(pQuerySqlNode->interval.interval)) { - if (TPARSER_HAS_TOKEN(pQuerySqlNode->sliding)) { + if (!TPARSER_HAS_TOKEN(pSqlNode->interval.interval)) { + if (TPARSER_HAS_TOKEN(pSqlNode->sliding)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -805,7 +809,7 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode } // interval is not null - SStrToken *t = &pQuerySqlNode->interval.interval; + SStrToken *t = &pSqlNode->interval.interval; if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -822,11 +826,11 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode } } - if (parseIntervalOffset(pCmd, pQueryInfo, &pQuerySqlNode->interval.offset) != TSDB_CODE_SUCCESS) { + if (parseIntervalOffset(pCmd, pQueryInfo, &pSqlNode->interval.offset) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - if (parseSlidingClause(pCmd, pQueryInfo, &pQuerySqlNode->sliding) != TSDB_CODE_SUCCESS) { + if (parseSlidingClause(pCmd, pQueryInfo, &pSqlNode->sliding) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -834,19 +838,19 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode return checkInvalidExprForTimeWindow(pCmd, pQueryInfo); } -int32_t parseSessionClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode * pQuerySqlNode) { +int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) { const char* msg1 = "gap should be fixed time window"; const char* msg2 = "only one type time window allowed"; const char* msg3 = "invalid column name"; const char* msg4 = "invalid time window"; // no session window - if (!TPARSER_HAS_TOKEN(pQuerySqlNode->sessionVal.gap)) { + if (!TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)) { return TSDB_CODE_SUCCESS; } - SStrToken* col = &pQuerySqlNode->sessionVal.col; - SStrToken* gap = &pQuerySqlNode->sessionVal.gap; + SStrToken* col = &pSqlNode->sessionVal.col; + SStrToken* gap = &pSqlNode->sessionVal.gap; char timeUnit = 0; if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit) != TSDB_CODE_SUCCESS) { @@ -988,7 +992,7 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam if (acctId == NULL || strlen(acctId) <= 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - + code = tNameSetAcctId(&pTableMetaInfo->name, acctId); if (code != 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); @@ -1398,20 +1402,17 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; } -void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { - SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscColumnListInsert(pQueryInfo->colList, &tsCol); +void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t tableUid) { + SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX}; + tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s); } -static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSqlExprItem* pItem) { +static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem) { const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables"; const char* msg2 = "invalid arithmetic expression in select clause"; const char* msg3 = "tag columns can not be used in arithmetic expression"; const char* msg4 = "columns from different table mixed up in arithmetic expression"; - // arithmetic function in select clause - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); - SColumnList columnList = {0}; int32_t arithmeticType = NON_ARITHMEIC_EXPR; @@ -1433,12 +1434,12 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t // expr string is set as the parameter of function SColumnIndex index = {.tableIndex = tableIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), + SExprInfo* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), getNewResColId(pQueryInfo), sizeof(double), false); char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z; - size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1); - tstrncpy(pExpr->aliasName, name, len); + size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->token.n + 1); + tstrncpy(pExpr->base.aliasName, name, len); tExprNode* pNode = NULL; SArray* colList = taosArrayInit(10, sizeof(SColIndex)); @@ -1476,11 +1477,11 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t char* c = tbufGetData(&bw, false); // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); - insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); + addExprParams(&pExpr->base, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); + insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->base.aliasName, pExpr); // add ts column - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); tbufCloseWriter(&bw); taosArrayDestroy(colList); @@ -1502,41 +1503,41 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t int32_t slot = tscNumOfFields(pQueryInfo) - 1; SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); - if (pInfo->pSqlExpr == NULL) { - SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); + if (pInfo->pExpr == NULL) { + SExprInfo* pExprInfo = calloc(1, sizeof(SExprInfo)); // arithmetic expression always return result in the format of double float - pArithExprInfo->bytes = sizeof(double); - pArithExprInfo->interBytes = sizeof(double); - pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + pExprInfo->base.resBytes = sizeof(double); + pExprInfo->base.interBytes = sizeof(double); + pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE; - pArithExprInfo->base.functionId = TSDB_FUNC_ARITHM; - pArithExprInfo->base.numOfParams = 1; - pArithExprInfo->base.resColId = getNewResColId(pQueryInfo); + pExprInfo->base.functionId = TSDB_FUNC_ARITHM; + pExprInfo->base.numOfParams = 1; + pExprInfo->base.resColId = getNewResColId(pQueryInfo); - int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &(pExprInfo->base.uid)); if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(pArithExprInfo->pExpr, NULL); + tExprTreeDestroy(pExprInfo->pExpr, NULL); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); } - pInfo->pArithExprInfo = pArithExprInfo; + pInfo->pExpr = pExprInfo; } SBufferWriter bw = tbufInitWriter(NULL, false); TRY(0) { - exprTreeToBinary(&bw, pInfo->pArithExprInfo->pExpr); + exprTreeToBinary(&bw, pInfo->pExpr->pExpr); } CATCH(code) { tbufCloseWriter(&bw); UNUSED(code); // TODO: other error handling } END_TRY - SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; - pFuncMsg->arg[0].argBytes = (int16_t) tbufTell(&bw); - pFuncMsg->arg[0].argValue.pz = tbufGetData(&bw, true); - pFuncMsg->arg[0].argType = TSDB_DATA_TYPE_BINARY; + SSqlExpr* pSqlExpr = &pInfo->pExpr->base; + pSqlExpr->param[0].nLen = (int16_t) tbufTell(&bw); + pSqlExpr->param[0].pz = tbufGetData(&bw, true); + pSqlExpr->param[0].nType = TSDB_DATA_TYPE_BINARY; // tbufCloseWriter(&bw); // TODO there is a memory leak } @@ -1545,7 +1546,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t } static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSqlExprItem* pItem) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); + SExprInfo* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -1553,7 +1554,7 @@ static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumn SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName; - tstrncpy(pExpr->aliasName, colName, sizeof(pExpr->aliasName)); + tstrncpy(pExpr->base.aliasName, colName, sizeof(pExpr->base.aliasName)); SColumnList ids = {0}; ids.num = 1; @@ -1564,15 +1565,15 @@ static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumn ids.num = 0; } - insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, (int8_t)pExpr->resType, pExpr->aliasName, pExpr); + insertResultField(pQueryInfo, startPos, &ids, pExpr->base.resBytes, (int8_t)pExpr->base.resType, pExpr->base.aliasName, pExpr); } static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) { // primary timestamp column has been added already size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { return; } } @@ -1606,25 +1607,30 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) { return false; } -int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) { - assert(pSelectList != NULL && pCmd != NULL); +int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery, + bool timeWindowQuery) { + assert(pSelNodeList != NULL && pCmd != NULL); + const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; + const char* msg4 = "only support distinct one tag"; const char* msg5 = "invalid function name"; - const char* msg6 = "only support distinct one tag"; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + // too many result columns not support order by in query + if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } if (pQueryInfo->colList == NULL) { pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } bool hasDistinct = false; - size_t numOfExpr = taosArrayGetSize(pSelectList); + size_t numOfExpr = taosArrayGetSize(pSelNodeList); for (int32_t i = 0; i < numOfExpr; ++i) { int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - tSqlExprItem* pItem = taosArrayGet(pSelectList, i); + tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i); if (hasDistinct == false) { hasDistinct = (pItem->distinct == true); @@ -1648,7 +1654,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis return TSDB_CODE_TSC_INVALID_SQL; } } else if (type == SQL_NODE_EXPR) { - int32_t code = handleArithmeticExpr(pCmd, clauseIndex, i, pItem); + int32_t code = handleArithmeticExpr(pCmd, pQueryInfo, i, pItem); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -1657,13 +1663,13 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis } if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } if (hasDistinct == true) { if (!isValidDistinctSql(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } pQueryInfo->distinctTag = true; } @@ -1681,29 +1687,30 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis return TSDB_CODE_SUCCESS; } -int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, - int8_t type, char* fieldName, SSqlExpr* pSqlExpr) { - - for (int32_t i = 0; i < pIdList->num; ++i) { - int32_t tableId = pIdList->ids[i].tableIndex; - STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[tableId]; - - int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - if (pIdList->ids[i].columnIndex >= numOfCols) { +int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes, + int8_t type, char* fieldName, SExprInfo* pSqlExpr) { + for (int32_t i = 0; i < pColList->num; ++i) { + int32_t tableIndex = pColList->ids[i].tableIndex; + STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[tableIndex]->pTableMeta; + + int32_t numOfCols = tscGetNumOfColumns(pTableMeta); + if (pColList->ids[i].columnIndex >= numOfCols) { continue; } - - tscColumnListInsert(pQueryInfo->colList, &(pIdList->ids[i])); + + uint64_t uid = pTableMeta->id.uid; + SSchema* pSchema = tscGetTableSchema(pTableMeta); + tscColumnListInsert(pQueryInfo->colList, pColList->ids[i].columnIndex, uid, &pSchema[pColList->ids[i].columnIndex]); } TAOS_FIELD f = tscCreateField(type, fieldName, bytes); SInternalField* pInfo = tscFieldInfoInsert(&pQueryInfo->fieldsInfo, outputIndex, &f); - pInfo->pSqlExpr = pSqlExpr; + pInfo->pExpr = pSqlExpr; return TSDB_CODE_SUCCESS; } -SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) { +SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; int32_t numOfCols = tscGetNumOfColumns(pTableMeta); @@ -1715,7 +1722,7 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tabl if (functionId == TSDB_FUNC_TAGPRJ) { index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema); } else { index.columnIndex = colIndex; } @@ -1725,26 +1732,26 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tabl (functionId == TSDB_FUNC_TAGPRJ)); } -SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, +SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { int16_t colId = getNewResColId(pQueryInfo); - SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, + SExprInfo* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); - tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName)); + tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName)); - SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex); + SColumnList ids = createColumnList(1, pIndex->tableIndex, pIndex->columnIndex); if (TSDB_COL_IS_TAG(flag)) { ids.num = 0; } insertResultField(pQueryInfo, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name, pExpr); - pExpr->colInfo.flag = flag; + pExpr->base.colInfo.flag = flag; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); if (TSDB_COL_IS_TAG(flag)) { - tscColumnListInsert(pTableMetaInfo->tagColList, pIndex); + tscColumnListInsert(pTableMetaInfo->tagColList, pIndex->columnIndex, pTableMetaInfo->pTableMeta->id.uid, pColSchema); } return pExpr; @@ -1766,8 +1773,8 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum } for (int32_t j = 0; j < numOfTotalColumns; ++j) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex); - tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName)); + SExprInfo* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex); + tstrncpy(pExpr->base.aliasName, pSchema[j].name, sizeof(pExpr->base.aliasName)); pIndex->columnIndex = j; SColumnList ids = {0}; @@ -1807,7 +1814,10 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } // add the primary timestamp column even though it is not required by user - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[index.tableIndex]->pTableMeta; + if (pTableMeta->tableType != TSDB_TEMP_TABLE) { + tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid); + } } else if (optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -1816,12 +1826,12 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t index.tableIndex = 0; SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->token, pItem->aliasName); - SSqlExpr* pExpr = + SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC); // NOTE: the first parameter is reserved for the tag column id during join query process. - pExpr->numOfParams = 2; - tVariantAssign(&pExpr->param[1], &pItem->pNode->value); + pExpr->base.numOfParams = 2; + tVariantAssign(&pExpr->base.param[1], &pItem->pNode->value); } else if (optr == TK_ID) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -1848,7 +1858,8 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } // add the primary timestamp column even though it is not required by user - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); } else { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1878,30 +1889,30 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS bytes = pSchema->bytes; } - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false); - tstrncpy(pExpr->aliasName, name, tListLen(pExpr->aliasName)); + SExprInfo* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false); + tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName)); if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) { - pExpr->colInfo.flag |= TSDB_COL_NULL; + pExpr->base.colInfo.flag |= TSDB_COL_NULL; } // set reverse order scan data blocks for last query if (functionID == TSDB_FUNC_LAST) { - pExpr->numOfParams = 1; - pExpr->param[0].i64 = TSDB_ORDER_DESC; - pExpr->param[0].nType = TSDB_DATA_TYPE_INT; + pExpr->base.numOfParams = 1; + pExpr->base.param[0].i64 = TSDB_ORDER_DESC; + pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT; } // for all queries, the timestamp column needs to be loaded - SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscColumnListInsert(pQueryInfo->colList, &index); + SSchema s = {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP,}; + tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, pExpr->base.uid, &s); // if it is not in the final result, do not add it - SColumnList ids = getColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); + SColumnList ids = createColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); if (finalResult) { - insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->aliasName, pExpr); + insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->base.aliasName, pExpr); } else { - tscColumnListInsert(pQueryInfo->colList, &(ids.ids[0])); + tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } return TSDB_CODE_SUCCESS; @@ -1934,14 +1945,14 @@ static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) { if (pQueryInfo->sessionWindow.gap > 0 || tscGroupbyColumn(pQueryInfo)) { size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExpr; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId != TSDB_FUNC_LAST && pExpr->functionId != TSDB_FUNC_LAST_DST) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId != TSDB_FUNC_LAST && pExpr->base.functionId != TSDB_FUNC_LAST_DST) { continue; } - pExpr->numOfParams = 1; - pExpr->param->i64 = TSDB_ORDER_ASC; - pExpr->param->nType = TSDB_DATA_TYPE_INT; + pExpr->base.numOfParams = 1; + pExpr->base.param->i64 = TSDB_ORDER_ASC; + pExpr->base.param->nType = TSDB_DATA_TYPE_INT; } } } @@ -1967,7 +1978,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SSqlExpr* pExpr = NULL; + SExprInfo* pExpr = NULL; SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->pParam != NULL) { @@ -1977,7 +1988,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if ((pToken->z == NULL || pToken->n == 0) && (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } + } + if (sqlOptr == TK_ALL) { // select table.* // check if the table name is valid or not @@ -2031,22 +2043,24 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName)); - getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1); + memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); + getColumnName(pItem, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1); - SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex); + SColumnList list = createColumnList(1, index.tableIndex, index.columnIndex); if (finalResult) { int32_t numOfOutput = tscNumOfFields(pQueryInfo); - insertResultField(pQueryInfo, numOfOutput, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->aliasName, pExpr); + insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName, pExpr); } else { - for (int32_t i = 0; i < ids.num; ++i) { - tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i])); + for (int32_t i = 0; i < list.num; ++i) { + SSchema* ps = tscGetTableSchema(pTableMetaInfo->pTableMeta); + tscColumnListInsert(pQueryInfo->colList, list.ids[i].columnIndex, pTableMetaInfo->pTableMeta->id.uid, + &ps[list.ids[i].columnIndex]); } } // the time stamp may be always needed if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); } return TSDB_CODE_SUCCESS; @@ -2089,11 +2103,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // 2. check if sql function can be applied on this column data type pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - int16_t colType = pSchema->type; - if (!IS_NUMERIC_TYPE(colType)) { + if (!IS_NUMERIC_TYPE(pSchema->type)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } else if (IS_UNSIGNED_NUMERIC_TYPE(colType) && functionId == TSDB_FUNC_DIFF) { + } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && functionId == TSDB_FUNC_DIFF) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -2110,10 +2123,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (functionId == TSDB_FUNC_DIFF) { colIndex += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, + SExprInfo* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); - SColumnList ids = getColumnList(1, 0, 0); + SColumnList ids = createColumnList(1, 0, 0); insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } @@ -2122,7 +2135,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); + SExprInfo* pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); if (functionId == TSDB_FUNC_LEASTSQR) { /* set the leastsquares parameters */ @@ -2131,33 +2144,31 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES); + addExprParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES); memset(val, 0, tListLen(val)); if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) { return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + addExprParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } - SColumnList ids = {0}; - ids.num = 1; - ids.ids[0] = index; - - memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName)); - getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1); - + SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + + memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); + getColumnName(pItem, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1); + if (finalResult) { int32_t numOfOutput = tscNumOfFields(pQueryInfo); - insertResultField(pQueryInfo, numOfOutput, &ids, pExpr->resBytes, (int32_t)pExpr->resType, pExpr->aliasName, pExpr); + insertResultField(pQueryInfo, numOfOutput, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType, + pExpr->base.aliasName, pExpr); } else { - for (int32_t i = 0; i < ids.num; ++i) { - tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i])); - } + assert(ids.num == 1); + tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); return TSDB_CODE_SUCCESS; } case TSDB_FUNC_FIRST: @@ -2294,7 +2305,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); // functions can not be applied to tags if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { @@ -2302,8 +2313,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } // 2. valid the column type - int16_t colType = pSchema[index.columnIndex].type; - if (!IS_NUMERIC_TYPE(colType)) { + if (!IS_NUMERIC_TYPE(pSchema->type)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -2314,12 +2324,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tVariant* pVariant = &pParamElem[1].pNode->value; - int8_t resultType = pSchema[index.columnIndex].type; - int16_t resultSize = pSchema[index.columnIndex].bytes; + int8_t resultType = pSchema->type; + int16_t resultSize = pSchema->bytes; - char val[8] = {0}; - SSqlExpr* pExpr = NULL; - + char val[8] = {0}; + + SExprInfo* pExpr = NULL; if (functionId == TSDB_FUNC_PERCT || functionId == TSDB_FUNC_APERCT) { tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE, true); @@ -2336,11 +2346,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col * for dp = 0, it is actually min, * for dp = 100, it is max, */ - tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); colIndex += 1; // the first column is ts pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + addExprParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); @@ -2354,29 +2364,30 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); - tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->aliasName)); + tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; - SColumnList ids = getColumnList(1, index.tableIndex, TS_COLUMN_INDEX); + SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); + addExprParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } - memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName)); - getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1); - - SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex); + memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); + getColumnName(pItem, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1); + + // todo refactor: tscColumnListInsert part + SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + if (finalResult) { - insertResultField(pQueryInfo, colIndex, &ids, resultSize, resultType, pExpr->aliasName, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, resultSize, resultType, pExpr->base.aliasName, pExpr); } else { - for (int32_t i = 0; i < ids.num; ++i) { - tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i])); - } + assert(ids.num == 1); + tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } return TSDB_CODE_SUCCESS; @@ -2426,9 +2437,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid, + &pSchema[index.columnIndex]); SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - + SSchema s = {0}; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { s = *tGetTbnameColumnSchema(); @@ -2468,10 +2480,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col s.bytes = bytes; s.type = (uint8_t)resType; - SSqlExpr* pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, &s, TSDB_COL_TAG); - pExpr->numOfParams = 1; - pExpr->param[0].i64 = pTableMetaInfo->pTableMeta->tableInfo.rowSize; - pExpr->param[0].nType = TSDB_DATA_TYPE_BIGINT; + SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, &s, TSDB_COL_TAG); + pExpr->base.numOfParams = 1; + pExpr->base.param[0].i64 = pTableMetaInfo->pTableMeta->tableInfo.rowSize; + pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; return TSDB_CODE_SUCCESS; } @@ -2483,7 +2495,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } // todo refactor -static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) { +static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) { assert(num == 1 && tableIndex >= 0); SColumnList columnList = {0}; @@ -2783,23 +2795,23 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t k = 0; k < size; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); - int16_t functionId = aAggs[pExpr->functionId].stableFuncId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, k); + int16_t functionId = aAggs[pExpr->base.functionId].stableFuncId; - int32_t colIndex = pExpr->colInfo.colIndex; + int32_t colIndex = pExpr->base.colInfo.colIndex; SSchema* pSrcSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, colIndex); if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { - if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->param[0].i64, &type, &bytes, + if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes, &interBytes, 0, true) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes); + tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->base.colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes); // todo refactor - pExpr->interBytes = interBytes; + pExpr->base.interBytes = interBytes; } } @@ -2816,14 +2828,14 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->base.colInfo.colIndex); // the final result size and type in the same as query on single table. // so here, set the flag to be false; int32_t inter = 0; - int32_t functionId = pExpr->functionId; + int32_t functionId = pExpr->base.functionId; if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) { continue; } @@ -2836,7 +2848,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { functionId = TSDB_FUNC_STDDEV; } - getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes, + getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->base.resType, &pExpr->base.resBytes, &inter, 0, false); } } @@ -2849,7 +2861,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) // filter sql function not supported by metric query yet. size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->base.functionId; if ((aAggs[functionId].status & TSDB_FUNCSTATE_STABLE) == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return true; @@ -2900,17 +2912,17 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo); assert(numOfExpr > 0); - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, startIdx); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, startIdx); // ts function can be simultaneously used with any other functions. - int32_t functionID = pExpr->functionId; + int32_t functionID = pExpr->base.functionId; if (functionID == TSDB_FUNC_TS || functionID == TSDB_FUNC_TS_DUMMY) { startIdx++; } - int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; + int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->base.functionId]; - if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || twQuery || !groupbyTagsOrNull(pQueryInfo))) { + if (tscSqlExprGet(pQueryInfo, 0)->base.functionId == TSDB_FUNC_LAST_ROW && (joinQuery || twQuery || !groupbyTagsOrNull(pQueryInfo))) { return false; } @@ -2919,14 +2931,14 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = startIdx + 1; i < size; ++i) { - SSqlExpr* pExpr1 = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr1 = tscSqlExprGet(pQueryInfo, i); - int16_t functionId = pExpr1->functionId; + int16_t functionId = pExpr1->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) { continue; } - if (functionId == TSDB_FUNC_PRJ && (pExpr1->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->colInfo.flag))) { + if (functionId == TSDB_FUNC_PRJ && (pExpr1->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->base.colInfo.flag))) { continue; } @@ -2946,7 +2958,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool return true; } -int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) { +int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; const char* msg3 = "columns from one table allowed as group by columns"; @@ -3032,14 +3044,14 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) taosArrayPush(pGroupExpr->columnInfo, &colIndex); index.columnIndex = relIndex; - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); } - tscColumnListInsert(pQueryInfo->colList, &index); + tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema); SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; taosArrayPush(pGroupExpr->columnInfo, &colIndex); @@ -3056,33 +3068,29 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) } -static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { - if (pColumn == NULL) { - return NULL; - } - - int32_t size = pColumn->numOfFilters + 1; +static SColumnFilterInfo* addColumnFilterInfo(SColumnFilterList* filterList) { + int32_t size = (filterList->numOfFilters) + 1; - char* tmp = (char*) realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size)); + char* tmp = (char*) realloc((void*)(filterList->filterInfo), sizeof(SColumnFilterInfo) * (size)); if (tmp != NULL) { - pColumn->filterInfo = (SColumnFilterInfo*)tmp; + filterList->filterInfo = (SColumnFilterInfo*)tmp; } else { return NULL; } - pColumn->numOfFilters++; + filterList->numOfFilters = size; - SColumnFilterInfo* pColFilterInfo = &pColumn->filterInfo[pColumn->numOfFilters - 1]; + SColumnFilterInfo* pColFilterInfo = &(filterList->filterInfo[size - 1]); memset(pColFilterInfo, 0, sizeof(SColumnFilterInfo)); return pColFilterInfo; } -static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, +static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t timePrecision, SColumnFilterInfo* pColumnFilter, int16_t colType, tSqlExpr* pExpr) { const char* msg = "not supported filter condition"; - tSqlExpr* pRight = pExpr->pRight; + tSqlExpr *pRight = pExpr->pRight; if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) { colType = TSDB_DATA_TYPE_BIGINT; @@ -3093,6 +3101,10 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, if (TSDB_CODE_SUCCESS != retVal) { return retVal; } + } else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BIGINT == pRight->value.nType)) { + if ((timePrecision == TSDB_TIME_PRECISION_MILLI) && (pRight->flags & (1 << EXPR_FLAG_US_TIMESTAMP))) { + pRight->value.i64 /= 1000; + } } int32_t retVal = TSDB_CODE_SUCCESS; @@ -3234,7 +3246,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC const char* msg2 = "binary column not support this operator"; const char* msg3 = "bool column not support this operator"; - SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex); + SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex->columnIndex, pTableMeta->id.uid, pSchema); SColumnFilterInfo* pColFilter = NULL; /* @@ -3243,10 +3255,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC */ if (sqlOptr == TK_AND) { // this is a new filter condition on this column - if (pColumn->numOfFilters == 0) { - pColFilter = addColumnFilterInfo(pColumn); + if (pColumn->info.flist.numOfFilters == 0) { + pColFilter = addColumnFilterInfo(&pColumn->info.flist); } else { // update the existed column filter information, find the filter info here - pColFilter = &pColumn->filterInfo[0]; + pColFilter = &pColumn->info.flist.filterInfo[0]; } if (pColFilter == NULL) { @@ -3254,7 +3266,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } } else if (sqlOptr == TK_OR) { // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" - pColFilter = addColumnFilterInfo(pColumn); + pColFilter = addColumnFilterInfo(&pColumn->info.flist); if (pColFilter == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -3287,11 +3299,11 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } } - pColumn->colIndex = *pIndex; + pColumn->columnIndex = pIndex->columnIndex; + pColumn->tableUid = pTableMeta->id.uid; - int16_t colType = pSchema->type; - - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr); + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + return doExtractColumnFilterInfo(pCmd, pQueryInfo, tinfo.precision, pColFilter, pSchema->type, pExpr); } static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) { @@ -3353,9 +3365,9 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { int32_t code = 0; const char* msg1 = "timestamp required for join tables"; + const char* msg2 = "only support one join tag for each table"; const char* msg3 = "type of join columns must be identical"; const char* msg4 = "invalid column name in join condition"; - const char* msg5 = "only support one join tag for each table"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; @@ -3366,7 +3378,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (code) { return code; } - + return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight); } @@ -3384,23 +3396,25 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (*leftNode == NULL) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + (*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid; (*leftNode)->tagColId = pTagSchema1->colId; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) { - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid)) { + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1); + if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } } int16_t leftIdx = index.tableIndex; - index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); @@ -3420,11 +3434,13 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS (*rightNode)->tagColId = pTagSchema2->colId; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) { - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta); + if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid)) { + + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2); if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } } @@ -3438,16 +3454,16 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if ((*leftNode)->tagJoin == NULL) { (*leftNode)->tagJoin = taosArrayInit(2, sizeof(int16_t)); } - + if ((*rightNode)->tagJoin == NULL) { (*rightNode)->tagJoin = taosArrayInit(2, sizeof(int16_t)); - } - + } + taosArrayPush((*leftNode)->tagJoin, &rightIdx); taosArrayPush((*rightNode)->tagJoin, &leftIdx); - + pQueryInfo->tagCond.joinInfo.hasJoin = true; - + return TSDB_CODE_SUCCESS; } @@ -3518,15 +3534,15 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer // Not supported data type in arithmetic expression uint64_t id = -1; for(int32_t i = 0; i < inc; ++i) { - SSqlExpr* p1 = tscSqlExprGet(pQueryInfo, i + outputIndex); - int16_t t = p1->resType; + SExprInfo* p1 = tscSqlExprGet(pQueryInfo, i + outputIndex); + int16_t t = p1->base.resType; if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) { return TSDB_CODE_TSC_INVALID_SQL; } if (i == 0) { - id = p1->uid; - } else if (id != p1->uid) { + id = p1->base.uid; + } else if (id != p1->base.uid) { return TSDB_CODE_TSC_INVALID_SQL; } } @@ -3807,7 +3823,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return TSDB_CODE_TSC_OUT_OF_MEMORY; } } - + int16_t leftIdx = index.tableIndex; if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -3825,20 +3841,20 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return TSDB_CODE_TSC_OUT_OF_MEMORY; } } - + int16_t rightIdx = index.tableIndex; if ((*leftNode)->tsJoin == NULL) { (*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t)); } - + if ((*rightNode)->tsJoin == NULL) { (*rightNode)->tsJoin = taosArrayInit(2, sizeof(int16_t)); - } - + } + taosArrayPush((*leftNode)->tsJoin, &rightIdx); taosArrayPush((*rightNode)->tsJoin, &leftIdx); - + /* * to release expression, e.g., m1.ts = m2.ts, * since this expression is used to set the join query type @@ -3916,6 +3932,10 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr const char* msg1 = "query condition between different columns must use 'AND'"; + if ((*pExpr)->flags & (1 << EXPR_FLAG_TS_ERROR)) { + return TSDB_CODE_TSC_INVALID_SQL; + } + tSqlExpr* pLeft = (*pExpr)->pLeft; tSqlExpr* pRight = (*pExpr)->pRight; @@ -3953,6 +3973,14 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr exchangeExpr(*pExpr); + if (pLeft->tokenId == TK_ID && pRight->tokenId == TK_TIMESTAMP && (pRight->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR))) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + if ((pLeft->flags & (1 << EXPR_FLAG_TS_ERROR)) || (pRight->flags & (1 << EXPR_FLAG_TS_ERROR))) { + return TSDB_CODE_TSC_INVALID_SQL; + } + return handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, parentOptr); } @@ -4086,8 +4114,8 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) { for (int32_t i = 0; i < num; ++i) { SColumn* pCol = taosArrayGetP(pColList, i); - for (int32_t j = 0; j < pCol->numOfFilters; ++j) { - SColumnFilterInfo* pColFilter = &pCol->filterInfo[j]; + for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) { + SColumnFilterInfo* pColFilter = &pCol->info.flist.filterInfo[j]; int32_t lowerOptr = pColFilter->lowerRelOptr; int32_t upperOptr = pColFilter->upperRelOptr; @@ -4110,7 +4138,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE const char* msg0 = "invalid timestamp"; const char* msg1 = "only one time stamp window allowed"; int32_t code = 0; - + if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -4124,7 +4152,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE if (code) { return code; } - + return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight); } else { SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -4214,18 +4242,22 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { tscError("%p: invalid column name (left)", pQueryInfo); } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + + SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); + tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]); if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { tscError("%p: invalid column name (right)", pQueryInfo); } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + + pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); + tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]); } } */ @@ -4336,7 +4368,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE // TODO: more error handling } END_TRY - // add to source column list + // add to required table column list STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); int64_t uid = pTableMetaInfo->pTableMeta->id.uid; int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -4345,7 +4377,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE for(int32_t j = 0; j < num; ++j) { SColIndex* pIndex = taosArrayGet(colList, j); SColumnIndex index = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols}; - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + + SSchema* s = tscGetTableSchema(pTableMetaInfo->pTableMeta); + tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid, + &s[pIndex->colIndex]); } tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw); @@ -4375,10 +4410,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { const char* msg1 = "timestamp required for join tables"; const char* msg2 = "tag required for join stables"; - + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { - SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; - + SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; + if (node == NULL || node->tsJoin == NULL || taosArrayGetSize(node->tsJoin) <= 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1); } @@ -4387,8 +4422,8 @@ int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { - SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; - + SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; + if (node == NULL || node->tagJoin == NULL || taosArrayGetSize(node->tagJoin) <= 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); } @@ -4400,12 +4435,12 @@ int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t type) { - SJoinNode *node = nodes[*tidx]; + SJoinNode *node = nodes[*tidx]; SArray* arr = (type == 0) ? node->tsJoin : node->tagJoin; size_t size = taosArrayGetSize(arr); p[*tidx] = 1; - + for (int32_t j = 0; j < size; j++) { int16_t* idx = taosArrayGet(arr, j); r[*idx] = 1; @@ -4418,21 +4453,21 @@ void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { const char* msg1 = "not all join tables have same timestamp"; const char* msg2 = "not all join tables have same tag"; - + int8_t r[TSDB_MAX_JOIN_TABLE_NUM] = {0}; int8_t p[TSDB_MAX_JOIN_TABLE_NUM] = {0}; - + for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 0); - + taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin); - + for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) { if (r[j]) { taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin, &j); } } - + memset(r, 0, sizeof(r)); memset(p, 0, sizeof(p)); } @@ -4440,20 +4475,20 @@ int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tsJoin) != pQueryInfo->numOfTables) { return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1); } - + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 1); - + taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin); - + for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) { if (r[j]) { taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin, &j); } } - + memset(r, 0, sizeof(r)); memset(p, 0, sizeof(p)); } @@ -4468,7 +4503,7 @@ int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { } -int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) { +int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -4550,7 +4585,7 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql ret = mergeJoinNodes(pQueryInfo, pSql); if (ret) { goto PARSE_WHERE_EXIT; - } + } } PARSE_WHERE_EXIT: @@ -4679,16 +4714,34 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } -int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySQL) { - SArray* pFillToken = pQuerySQL->fillType; +int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) { + SArray* pFillToken = pSqlNode->fillType; + if (pSqlNode->fillType == NULL) { + return TSDB_CODE_SUCCESS; + } + tVariantListItem* pItem = taosArrayGet(pFillToken, 0); const int32_t START_INTERPO_COL_IDX = 1; - const char* msg = "illegal value or data overflow"; const char* msg1 = "value is expected"; const char* msg2 = "invalid fill option"; const char* msg3 = "top/bottom not support fill"; + const char* msg4 = "illegal value or data overflow"; + const char* msg5 = "fill only available for interval query"; + + if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + /* + * fill options are set at the end position, when all columns are set properly + * the columns may be increased due to group by operation + */ + if (checkQueryRangeForFill(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); @@ -4752,7 +4805,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ tVariant* p = taosArrayGet(pFillToken, j); int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true); if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } } @@ -4775,8 +4828,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for(int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } @@ -4801,7 +4854,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { } } -int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode, SSchema* pSchema) { +int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; const char* msg2 = "order by primary timestamp or first tag in groupby clause allowed"; @@ -4816,11 +4869,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQueryInfo->order.orderColId = 0; return TSDB_CODE_SUCCESS; } - if (pQuerySqlNode->pSortOrder == NULL) { + if (pSqlNode->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; } - SArray* pSortorder = pQuerySqlNode->pSortOrder; + SArray* pSortorder = pSqlNode->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -4888,24 +4941,24 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); - assert(pExpr->functionId == TSDB_FUNC_TS); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, 0); + assert(pExpr->base.functionId == TSDB_FUNC_TS); pExpr = tscSqlExprGet(pQueryInfo, 1); - if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -4918,7 +4971,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* } if (s == 2) { - tVariantListItem *pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; @@ -4927,7 +4980,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } - pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 1); + pItem = taosArrayGet(pSqlNode->pSortOrder, 1); tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -4954,21 +5007,21 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); - assert(pExpr->functionId == TSDB_FUNC_TS); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, 0); + assert(pExpr->base.functionId == TSDB_FUNC_TS); pExpr = tscSqlExprGet(pQueryInfo, 1); - if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } - tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0); + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; } @@ -5005,7 +5058,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlCmd* pCmd = &pSql->cmd; SAlterTableInfo* pAlterSQL = pInfo->pAlterInfo; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX); @@ -5169,7 +5222,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { - tscError("%p failed to malloc for alter table msg", pSql); + tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -5269,7 +5322,7 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; + int32_t functId = tscSqlExprGet(pQueryInfo, i)->base.functionId; if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -5286,14 +5339,14 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t k = 0; k < size; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, k); // projection query on primary timestamp, the selectivity function needs to be present. - if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool hasSelectivity = false; for (int32_t j = 0; j < size; ++j) { - SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j); - if ((aAggs[pEx->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { + SExprInfo* pEx = tscSqlExprGet(pQueryInfo, j); + if ((aAggs[pEx->base.functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { hasSelectivity = true; break; } @@ -5304,8 +5357,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu } } - if ((pExpr->functionId == TSDB_FUNC_PRJ && pExpr->numOfParams == 0) || pExpr->functionId == TSDB_FUNC_DIFF || - pExpr->functionId == TSDB_FUNC_ARITHM) { + if ((pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || pExpr->base.functionId == TSDB_FUNC_DIFF || + pExpr->base.functionId == TSDB_FUNC_ARITHM) { isProjectionFunction = true; } } @@ -5493,10 +5546,14 @@ bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) { return true; } - return (pQueryInfo->window.skey == pQueryInfo->window.ekey) && (pQueryInfo->window.skey != 0); + if (pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX) { + return false; + } + + return !(pQueryInfo->window.skey != pQueryInfo->window.ekey && pQueryInfo->interval.interval == 0); } -int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySqlNode* pQuerySqlNode, SSqlObj* pSql) { +int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIndex, SSqlNode* pSqlNode, SSqlObj* pSql) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); const char* msg0 = "soffset/offset can not be less than 0"; @@ -5504,19 +5561,19 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn const char* msg2 = "slimit/soffset can not apply to projection query"; // handle the limit offset value, validate the limit - pQueryInfo->limit = pQuerySqlNode->limit; + pQueryInfo->limit = pSqlNode->limit; pQueryInfo->clauseLimit = pQueryInfo->limit.limit; - pQueryInfo->slimit = pQuerySqlNode->slimit; + pQueryInfo->slimit = pSqlNode->slimit; - tscDebug("%p limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql, pQueryInfo->limit.limit, - pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset); + tscDebug("0x%"PRIx64" limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql->self, + pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset); if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); } if (pQueryInfo->limit.limit == 0) { - tscDebug("%p limit 0, no output result", pSql); + tscDebug("0x%"PRIx64" limit 0, no output result", pSql->self); pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; return TSDB_CODE_SUCCESS; } @@ -5538,7 +5595,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn } if (pQueryInfo->slimit.limit == 0) { - tscDebug("%p slimit 0, no output result", pSql); + tscDebug("0x%"PRIx64" slimit 0, no output result", pSql->self); pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; return TSDB_CODE_SUCCESS; } @@ -5556,7 +5613,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn // No tables included. No results generated. Query results are empty. if (pTableMetaInfo->vgroupList->numOfVgroups == 0) { - tscDebug("%p no table in super table, no output result", pSql); + tscDebug("0x%"PRIx64" no table in super table, no output result", pSql->self); pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; return TSDB_CODE_SUCCESS; } @@ -5691,32 +5748,33 @@ int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql) { } void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex) { - SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentObj->cmd, subClauseIndex); + SQueryInfo* pParentQueryInfo = tscGetQueryInfo(&pParentObj->cmd, subClauseIndex); if (pParentQueryInfo->groupbyExpr.numOfGroupCols > 0) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); - SSqlExpr* pExpr = NULL; + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, subClauseIndex); + SExprInfo* pExpr = NULL; size_t size = taosArrayGetSize(pQueryInfo->exprList); if (size > 0) { pExpr = tscSqlExprGet(pQueryInfo, (int32_t)size - 1); } - if (pExpr == NULL || pExpr->functionId != TSDB_FUNC_TAG) { + if (pExpr == NULL || pExpr->base.functionId != TSDB_FUNC_TAG) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pParentQueryInfo, tableIndex); - int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); + uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; + int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, uid); SSchema* pTagSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, colId); int16_t colIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, colId); - SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex}; + SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex}; char* name = pTagSchema->name; int16_t type = pTagSchema->type; int16_t bytes = pTagSchema->bytes; pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); - pExpr->colInfo.flag = TSDB_COL_TAG; + pExpr->base.colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; @@ -5724,21 +5782,20 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau int32_t relIndex = index.columnIndex; - pExpr->colInfo.colIndex = relIndex; + pExpr->base.colInfo.colIndex = relIndex; SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); pColIndex->colIndex = relIndex; - index = (SColumnIndex) {.tableIndex = tableIndex, .columnIndex = relIndex}; - tscColumnListInsert(pTableMetaInfo->tagColList, &index); + tscColumnListInsert(pTableMetaInfo->tagColList, relIndex, uid, pTagSchema); } } } // limit the output to be 1 for each state value -static void doLimitOutputNormalColOfGroupby(SSqlExpr* pExpr) { +static void doLimitOutputNormalColOfGroupby(SExprInfo* pExpr) { int32_t outputRow = 1; - tVariantCreateFromBinary(&pExpr->param[0], (char*)&outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT); - pExpr->numOfParams = 1; + tVariantCreateFromBinary(&pExpr->base.param[0], (char*)&outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT); + pExpr->base.numOfParams = 1; } void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { @@ -5755,7 +5812,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { int32_t numOfFields = tscNumOfFields(pQueryInfo); SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfFields - 1); - doLimitOutputNormalColOfGroupby(pInfo->pSqlExpr); + doLimitOutputNormalColOfGroupby(pInfo->pExpr); pInfo->visible = false; } @@ -5768,25 +5825,25 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_TAGPRJ || pExpr->functionId == TSDB_FUNC_TAG) { - pExpr->functionId = TSDB_FUNC_TAG_DUMMY; - tagLength += pExpr->resBytes; - } else if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - pExpr->functionId = TSDB_FUNC_TS_DUMMY; - tagLength += pExpr->resBytes; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ || pExpr->base.functionId == TSDB_FUNC_TAG) { + pExpr->base.functionId = TSDB_FUNC_TAG_DUMMY; + tagLength += pExpr->base.resBytes; + } else if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + pExpr->base.functionId = TSDB_FUNC_TS_DUMMY; + tagLength += pExpr->base.resBytes; } } SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if ((pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) && - !(pExpr->functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->colInfo.flag))) { - SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex]; - getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, (int32_t)pExpr->param[0].i64, &pExpr->resType, - &pExpr->resBytes, &pExpr->interBytes, tagLength, isSTable); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY) && + !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) { + SSchema* pColSchema = &pSchema[pExpr->base.colInfo.colIndex]; + getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->base.functionId, (int32_t)pExpr->base.param[0].i64, &pExpr->base.resType, + &pExpr->base.resBytes, &pExpr->base.interBytes, tagLength, isSTable); } } } @@ -5795,17 +5852,17 @@ static int32_t doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_PRJ && (!TSDB_COL_IS_UD_COL(pExpr->colInfo.flag) && (pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX))) { + if (pExpr->base.functionId == TSDB_FUNC_PRJ && (!TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag) && (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX))) { bool qualifiedCol = false; for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); - if (pExpr->colInfo.colId == pColIndex->colId) { + if (pExpr->base.colInfo.colId == pColIndex->colId) { qualifiedCol = true; doLimitOutputNormalColOfGroupby(pExpr); - pExpr->numOfParams = 1; + pExpr->base.numOfParams = 1; break; } } @@ -5838,10 +5895,10 @@ static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_PRJ) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ) { hasColumnPrj = true; - } else if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { + } else if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ) { hasTagPrj = true; } } @@ -5855,12 +5912,12 @@ static bool allTagPrjInGroupby(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId != TSDB_FUNC_TAGPRJ) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId != TSDB_FUNC_TAGPRJ) { continue; } - if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->colInfo.colId)) { + if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->base.colInfo.colId)) { allInGroupby = false; break; } @@ -5874,9 +5931,9 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { - pExpr->functionId = TSDB_FUNC_TAG; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ) { + pExpr->base.functionId = TSDB_FUNC_TAG; } } } @@ -5897,18 +5954,18 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i); - if (pExpr->functionId == TSDB_FUNC_TAGPRJ || - (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { + SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i); + if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ || + (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { tagTsColExists = true; // selectivity + ts/tag column break; } } for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i); + SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i); - int16_t functionId = pExpr->functionId; + int16_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_ARITHM) { continue; @@ -5944,14 +6001,14 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) * Otherwise, return with error code. */ for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int16_t functionId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + int16_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == 0) { continue; } if ((functionId == TSDB_FUNC_LAST_ROW) || - (functionId == TSDB_FUNC_LAST_DST && (pExpr->colInfo.flag & TSDB_COL_NULL) != 0)) { + (functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) { // do nothing } else { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -5985,34 +6042,30 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) } static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { - const char* msg2 = "interval not allowed in group by normal column"; + const char* msg1 = "interval not allowed in group by normal column"; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SSchema s = *tGetTbnameColumnSchema(); SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); - int16_t bytes = 0; - int16_t type = 0; - char* name = NULL; + + SSchema* tagSchema = NULL; + if (!UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); + } + + SSchema* s = NULL; for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i); int16_t colIndex = pColIndex->colIndex; + if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - type = s.type; - bytes = s.bytes; - name = s.name; + s = tGetTbnameColumnSchema(); } else { if (TSDB_COL_IS_TAG(pColIndex->flag)) { - SSchema* tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - - type = tagSchema[colIndex].type; - bytes = tagSchema[colIndex].bytes; - name = tagSchema[colIndex].name; + s = &tagSchema[colIndex]; } else { - type = pSchema[colIndex].type; - bytes = pSchema[colIndex].bytes; - name = pSchema[colIndex].name; + s = &pSchema[colIndex]; } } @@ -6020,34 +6073,33 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); - - memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); - tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); - - pExpr->colInfo.flag = TSDB_COL_TAG; + SExprInfo* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, s->type, s->bytes, + getNewResColId(pQueryInfo), s->bytes, true); + + memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName)); + tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName)); + + pExpr->base.colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list - SColumnList ids = getColumnList(1, 0, pColIndex->colIndex); - insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr); + SColumnList ids = createColumnList(1, 0, pColIndex->colIndex); + insertResultField(pQueryInfo, (int32_t)size, &ids, s->bytes, (int8_t)s->type, s->name, pExpr); } else { // if this query is "group by" normal column, time window query is not allowed if (isTimeWindowQuery(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } bool hasGroupColumn = false; for (int32_t j = 0; j < size; ++j) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, j); - if (pExpr->colInfo.colId == pColIndex->colId) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, j); + if ((pExpr->base.functionId == TSDB_FUNC_PRJ) && pExpr->base.colInfo.colId == pColIndex->colId) { + hasGroupColumn = true; break; } } - /* - * if the group by column does not required by user, add this column into the final result set - * but invisible to user - */ + //if the group by column does not required by user, add an invisible column into the final result set. if (!hasGroupColumn) { doAddGroupColumnForSubquery(pQueryInfo, i); } @@ -6064,8 +6116,8 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) { int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int32_t functionId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + int32_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ) { tagProjection = true; @@ -6073,7 +6125,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) { } if (functionId == TSDB_FUNC_COUNT) { - assert(pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX); + assert(pExpr->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX); tableCounting = true; } } @@ -6104,6 +6156,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { // check if all the tags prj columns belongs to the group by columns if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) { + // It is a groupby aggregate query, the tag project function is not suitable for this case. updateTagPrjFunction(pQueryInfo); return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo); } @@ -6111,18 +6164,18 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { // check all query functions in selection clause, multi-output functions are not allowed size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int32_t functId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + int32_t functId = pExpr->base.functionId; /* * group by normal columns. * Check if the column projection is identical to the group by column or not */ - if (functId == TSDB_FUNC_PRJ && pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (functId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool qualified = false; for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); - if (pColIndex->colId == pExpr->colInfo.colId) { + if (pColIndex->colId == pExpr->base.colInfo.colId) { qualified = true; break; } @@ -6138,7 +6191,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } @@ -6147,10 +6200,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_TSC_INVALID_SQL; } - /* - * group by tag function must be not changed the function name, otherwise, the group operation may fail to - * divide the subset of final result. - */ if (doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -6165,12 +6214,12 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return checkUpdateTagPrjFunctions(pQueryInfo, pCmd); } } -int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode) { +int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) { const char* msg1 = "only one expression allowed"; const char* msg2 = "invalid expression in select clause"; const char* msg3 = "invalid function"; - SArray* pExprList = pQuerySqlNode->pSelectList; + SArray* pExprList = pSqlNode->pSelNodeList; size_t size = taosArrayGetSize(pExprList); if (size != 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -6222,12 +6271,12 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode } SColumnIndex ind = {0}; - SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT, + SExprInfo* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pQueryInfo), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false); tSqlExprItem* item = taosArrayGet(pExprList, 0); const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[index].name; - tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName)); + tstrncpy(pExpr1->base.aliasName, name, tListLen(pExpr1->base.aliasName)); return TSDB_CODE_SUCCESS; } @@ -6316,8 +6365,8 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) { } // for debug purpose -void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); +void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) { + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, subClauseIndex); int32_t size = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); if (size == 0) { @@ -6326,17 +6375,18 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { int32_t totalBufSize = 1024; - char str[1024] = {0}; + char str[1024+1] = {0}; int32_t offset = 0; offset += sprintf(str, "num:%d [", size); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); char tmpBuf[1024] = {0}; int32_t tmpLen = 0; tmpLen = - sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].name, pExpr->uid, pExpr->colInfo.colId); + sprintf(tmpBuf, "%s(uid:%" PRIu64 ", %d)", aAggs[pExpr->base.functionId].name, pExpr->base.uid, + pExpr->base.colInfo.colId); if (tmpLen + offset >= totalBufSize - 1) break; @@ -6351,14 +6401,14 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { assert(offset < totalBufSize); str[offset] = ']'; assert(offset < totalBufSize); - tscDebug("%p select clause:%s", pSql, str); + tscDebug("0x%"PRIx64" select clause:%s", pSql->self, str); } int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { const char* msg1 = "invalid table name"; SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, subClauseIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo; @@ -6417,7 +6467,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { SSqlCmd* pCmd = &pSql->cmd; SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); // two table: the first one is for current table, and the secondary is for the super table. if (pQueryInfo->numOfTables < 2) { @@ -6455,7 +6505,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { size_t valSize = taosArrayGetSize(pValList); - // too long tag values will return invalid sql, not be truncated automatically SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta); STagData *pTag = &pCreateTableInfo->tagdata; @@ -6465,7 +6514,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SArray* pNameList = NULL; size_t nameSize = 0; int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta); @@ -6622,7 +6670,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { const char* msg7 = "time interval is required"; SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); assert(pQueryInfo->numOfTables == 1); SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo; @@ -6630,18 +6678,18 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { // if sql specifies db, use it, otherwise use default db SStrToken* pName = &(pCreateTable->name); - SQuerySqlNode* pQuerySqlNode = pCreateTable->pSelect; + SSqlNode* pSqlNode = pCreateTable->pSelect; if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - SFromInfo* pFromInfo = pInfo->pCreateTableInfo->pSelect->from; - if (pFromInfo == NULL || taosArrayGetSize(pFromInfo->tableList) == 0) { + SRelationInfo* pFromInfo = pInfo->pCreateTableInfo->pSelect->from; + if (pFromInfo == NULL || taosArrayGetSize(pFromInfo->list) == 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - STableNamePair* p1 = taosArrayGet(pFromInfo->tableList, 0); + STableNamePair* p1 = taosArrayGet(pFromInfo->list, 0); SStrToken srcToken = {.z = p1->name.z, .n = p1->name.n, .type = TK_STRING}; if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -6658,18 +6706,18 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { } bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); - if (parseSelectClause(&pSql->cmd, 0, pQuerySqlNode->pSelectList, isSTable, false, false) != TSDB_CODE_SUCCESS) { + if (validateSelectNodeList(&pSql->cmd, pQueryInfo, pSqlNode->pSelNodeList, isSTable, false, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - if (pQuerySqlNode->pWhere != NULL) { // query condition in stream computing - if (parseWhereClause(pQueryInfo, &pQuerySqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + if (pSqlNode->pWhere != NULL) { // query condition in stream computing + if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } } // set interval value - if (parseIntervalClause(pSql, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) { + if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -6687,7 +6735,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return code; } - if (pQuerySqlNode->sqlstr.n > TSDB_MAX_SAVED_SQL_LEN) { + if (pSqlNode->sqlstr.n > TSDB_MAX_SAVED_SQL_LEN) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } @@ -6705,12 +6753,12 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { * check if fill operation is available, the fill operation is parsed and executed during query execution, * not here. */ - if (pQuerySqlNode->fillType != NULL) { + if (pSqlNode->fillType != NULL) { if (pQueryInfo->interval.interval == 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->fillType, 0); + tVariantListItem* pItem = taosArrayGet(pSqlNode->fillType, 0); if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { @@ -6724,7 +6772,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return TSDB_CODE_SUCCESS; } -static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { +int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { const char* msg3 = "start(end) time of query range required or time range too large"; if (pQueryInfo->interval.interval == 0) { @@ -6759,143 +6807,106 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } +// TODO normalize the function expression and compare it +int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pSqlExpr, + SExprInfo** pExpr) { + *pExpr = NULL; - int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) { - tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false}; - - int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - - // ADD TRUE FOR TEST - if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - - ++pQueryInfo->havingFieldNum; - - size_t n = tscSqlExprNumOfExprs(pQueryInfo); - SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1); - - int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); - pInfo->visible = false; - - if (pInfo->pFieldFilters == NULL) { - SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter)); - if (pFieldFilters == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + size_t num = taosArrayGetSize(pSelectNodeList); + for(int32_t i = 0; i < num; ++i) { + tSqlExprItem* pItem = taosArrayGet(pSelectNodeList, i); + if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it, - SColumn* pFilters = calloc(1, sizeof(SColumn)); - if (pFilters == NULL) { - tfree(pFieldFilters); - - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - pFieldFilters->pFilters = pFilters; - pFieldFilters->pSqlExpr = pSqlExpr; - pSqlExpr->pFilter = pFilters; - pInfo->pFieldFilters = pFieldFilters; - } - - pInfo->pFieldFilters->pExpr = pExpr; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + int32_t functionId = pSqlExpr->functionId; + if (pSqlExpr->pParam == NULL) { + index.columnIndex = 0; + index.tableIndex = 0; + } else { + tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->pParam, 0); + SStrToken* pToken = &pParamElem->pNode->colInfo; + getColumnIndexByName(pCmd, pToken, pQueryInfo, &index); + } - *interField = pInfo; + size_t numOfNodeInSel = tscSqlExprNumOfExprs(pQueryInfo); + for(int32_t k = 0; k < numOfNodeInSel; ++k) { + SExprInfo* pExpr1 = tscSqlExprGet(pQueryInfo, k); - return TSDB_CODE_SUCCESS; -} + if (pExpr1->base.functionId != functionId) { + continue; + } -int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) { - SInternalField* pInfo = NULL; + if (pExpr1->base.colInfo.colIndex != index.columnIndex) { + continue; + } - for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { - pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i); + ++pQueryInfo->havingFieldNum; + *pExpr = pExpr1; + break; + } - if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) { - *pField = pInfo; + assert(*pExpr != NULL); return TSDB_CODE_SUCCESS; } } - int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo); - if (ret) { - return ret; - } - - *pField = pInfo; + tSqlExprItem item = {.pNode = pSqlExpr, .aliasName = NULL, .distinct = false}; - return TSDB_CODE_SUCCESS; -} + int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); -static int32_t genExprFilter(SExprFilter * exprFilter) { - exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t)); - if (exprFilter->fp == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + // ADD TRUE FOR TEST + if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; } - for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) { - SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i]; + ++pQueryInfo->havingFieldNum; - int32_t lower = filterInfo->lowerRelOptr; - int32_t upper = filterInfo->upperRelOptr; - if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { - tscError("invalid rel optr"); - return TSDB_CODE_TSC_APP_ERROR; - } + size_t n = tscSqlExprNumOfExprs(pQueryInfo); + *pExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1); - __filter_func_t ffp = getFilterOperator(lower, upper); - if (ffp == NULL) { - tscError("invalid filter info"); - return TSDB_CODE_TSC_APP_ERROR; - } - - taosArrayPush(exprFilter->fp, &ffp); - } + SInternalField* pField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, n - 1); + pField->visible = false; return TSDB_CODE_SUCCESS; } -static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) { +static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t sqlOptr) { const char* msg1 = "non binary column not support like operator"; - const char* msg2 = "invalid operator for binary column in having clause"; + const char* msg2 = "invalid operator for binary column in having clause"; const char* msg3 = "invalid operator for bool column in having clause"; - SColumn* pColumn = NULL; SColumnFilterInfo* pColFilter = NULL; - SInternalField* pInfo = NULL; - + // TODO refactor: validate the expression /* * in case of TK_AND filter condition, we first find the corresponding column and build the query condition together * the already existed condition. */ + SExprInfo *expr = NULL; if (sqlOptr == TK_AND) { - int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, &expr); if (ret) { return ret; } - pColumn = pInfo->pFieldFilters->pFilters; - // this is a new filter condition on this column - if (pColumn->numOfFilters == 0) { - pColFilter = addColumnFilterInfo(pColumn); + if (expr->base.flist.numOfFilters == 0) { + pColFilter = addColumnFilterInfo(&expr->base.flist); } else { // update the existed column filter information, find the filter info here - pColFilter = &pColumn->filterInfo[0]; + pColFilter = &expr->base.flist.filterInfo[0]; } if (pColFilter == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } } else if (sqlOptr == TK_OR) { - int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, &expr); if (ret) { return ret; } - pColumn = pInfo->pFieldFilters->pFilters; - // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" - pColFilter = addColumnFilterInfo(pColumn); + // TODO refactor + pColFilter = addColumnFilterInfo(&expr->base.flist); if (pColFilter == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -6904,38 +6915,42 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } pColFilter->filterstr = - ((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); + ((expr->base.resType == TSDB_DATA_TYPE_BINARY || expr->base.resType == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); if (pColFilter->filterstr) { if (pExpr->tokenId != TK_EQ - && pExpr->tokenId != TK_NE - && pExpr->tokenId != TK_ISNULL - && pExpr->tokenId != TK_NOTNULL - && pExpr->tokenId != TK_LIKE - ) { + && pExpr->tokenId != TK_NE + && pExpr->tokenId != TK_ISNULL + && pExpr->tokenId != TK_NOTNULL + && pExpr->tokenId != TK_LIKE + ) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { if (pExpr->tokenId == TK_LIKE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - - if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) { + + if (expr->base.resType == TSDB_DATA_TYPE_BOOL) { if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } } - int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pTableMeta->tableInfo.precision, pColFilter, + expr->base.resType, pExpr); if (ret) { - return ret; + return ret; } - - return genExprFilter(pInfo->pFieldFilters); + + return TSDB_CODE_SUCCESS; } -int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) { +int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t parentOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -6946,12 +6961,12 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, in tSqlExpr* pRight = pExpr->pRight; if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) { - int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId); + int32_t ret = getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, pExpr->tokenId); if (ret != TSDB_CODE_SUCCESS) { return ret; } - return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId); + return getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr->pRight, pExpr->tokenId); } if (pLeft == NULL || pRight == NULL) { @@ -6964,14 +6979,12 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, in exchangeExpr(pExpr); - pLeft = pExpr->pLeft; + pLeft = pExpr->pLeft; pRight = pExpr->pRight; - - if (pLeft->type != SQL_NODE_SQLFUNCTION) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + if (pRight->type != SQL_NODE_VALUE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6980,37 +6993,35 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, in return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - //if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { - // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - //} - if (pLeft->pParam) { size_t size = taosArrayGetSize(pLeft->pParam); for (int32_t i = 0; i < size; i++) { - tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i); - if (pParamElem->pNode->tokenId != TK_ALL && - pParamElem->pNode->tokenId != TK_ID && - pParamElem->pNode->tokenId != TK_STRING && - pParamElem->pNode->tokenId != TK_INTEGER && - pParamElem->pNode->tokenId != TK_FLOAT) { + tSqlExprItem* pParamItem = taosArrayGet(pLeft->pParam, i); + + tSqlExpr* pExpr1 = pParamItem->pNode; + if (pExpr1->tokenId != TK_ALL && + pExpr1->tokenId != TK_ID && + pExpr1->tokenId != TK_STRING && + pExpr1->tokenId != TK_INTEGER && + pExpr1->tokenId != TK_FLOAT) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - - if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) { + + if (pExpr1->tokenId == TK_ID && (pExpr1->colInfo.z == NULL && pExpr1->colInfo.n == 0)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pParamElem->pNode->tokenId == TK_ID) { + if (pExpr1->tokenId == TK_ID) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + if ((getColumnIndexByName(pCmd, &pExpr1->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - - if (index.columnIndex <= 0 || - index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } @@ -7022,12 +7033,11 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, in return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr); + return handleExprInHavingClause(pCmd, pQueryInfo, pSelectNodeList, pExpr, parentOptr); } - - -int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) { +int32_t validateHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, SArray* pSelectNodeList, + int32_t joinQuery, int32_t timeWindowQuery) { const char* msg1 = "having only works with group by"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "invalid expression in having clause"; @@ -7049,8 +7059,8 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd } int32_t ret = 0; - - if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { + + if ((ret = getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { return ret; } @@ -7062,39 +7072,113 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd return TSDB_CODE_SUCCESS; } +static int32_t doLoadAllTableMeta(SSqlObj* pSql, int32_t index, SSqlNode* pSqlNode, int32_t numOfTables) { + const char* msg1 = "invalid table name"; + const char* msg2 = "invalid table alias name"; + const char* msg3 = "alias name too long"; + + int32_t code = TSDB_CODE_SUCCESS; + + SSqlCmd* pCmd = &pSql->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, index); + + for (int32_t i = 0; i < numOfTables; ++i) { + if (pQueryInfo->numOfTables <= i) { // more than one table + tscAddEmptyMetaInfo(pQueryInfo); + } + STableNamePair *item = taosArrayGet(pSqlNode->from->list, i); + SStrToken *oriName = &item->name; + if (oriName->type == TK_INTEGER || oriName->type == TK_FLOAT) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + tscDequoteAndTrimToken(oriName); + if (tscValidateName(oriName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } -int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) { - assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0)); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); + code = tscSetTableFullName(pTableMetaInfo, oriName, pSql); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + SStrToken* aliasName = &item->aliasName; + if (TPARSER_HAS_TOKEN(*aliasName)) { + if (aliasName->type == TK_INTEGER || aliasName->type == TK_FLOAT) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + tscDequoteAndTrimToken(aliasName); + if (tscValidateName(aliasName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + if (aliasName->n >= TSDB_TABLE_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + strncpy(pTableMetaInfo->aliasName, aliasName->z, aliasName->n); + } else { + strncpy(pTableMetaInfo->aliasName, tNameGetTableName(&pTableMetaInfo->name), + tListLen(pTableMetaInfo->aliasName)); + } + + code = tscGetTableMeta(pSql, pTableMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + return TSDB_CODE_SUCCESS; +} + +static STableMeta* extractTempTableMetaFromNestQuery(SQueryInfo* pUpstream) { + int32_t numOfColumns = pUpstream->fieldsInfo.numOfOutput; + + STableMeta* meta = calloc(1, sizeof(STableMeta) + sizeof(SSchema) * numOfColumns); + meta->tableType = TSDB_TEMP_TABLE; + + STableComInfo *info = &meta->tableInfo; + info->numOfColumns = numOfColumns; + info->numOfTags = 0; + + int32_t n = 0; + for(int32_t i = 0; i < numOfColumns; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(&pUpstream->fieldsInfo, i); + if (pField->visible) { + meta->schema[n].bytes = pField->field.bytes; + meta->schema[n].type = pField->field.type; + meta->schema[n].colId = pField->pExpr->base.resColId; + tstrncpy(meta->schema[n].name, pField->pExpr->base.aliasName, TSDB_COL_NAME_LEN); + n += 1; + } + } + + return meta; +} + +int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, int32_t index) { + assert(pSqlNode != NULL && (pSqlNode->from == NULL || taosArrayGetSize(pSqlNode->from->list) > 0)); - const char* msg0 = "invalid table name"; const char* msg1 = "point interpolation query needs timestamp"; - const char* msg2 = "fill only available for interval query"; + const char* msg2 = "too many tables in from clause"; const char* msg3 = "start(end) time of query range required or time range too large"; - const char* msg5 = "too many columns in selection clause"; - const char* msg6 = "too many tables in from clause"; - const char* msg7 = "invalid table alias name"; - const char* msg8 = "alias name too long"; int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, index); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (pTableMetaInfo == NULL) { pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo); } assert(pCmd->clauseIndex == index); - // too many result columns not support order by in query - if (taosArrayGetSize(pQuerySqlNode->pSelectList) > TSDB_MAX_COLUMNS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); - } - /* * handle the sql expression without from subclause * select current_database(); @@ -7102,199 +7186,177 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i * select client_version(); * select server_state(); */ - if (pQuerySqlNode->from == NULL) { - assert(pQuerySqlNode->fillType == NULL && pQuerySqlNode->pGroupby == NULL && pQuerySqlNode->pWhere == NULL && - pQuerySqlNode->pSortOrder == NULL); - return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySqlNode); - } - - size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList); - if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + if (pSqlNode->from == NULL) { + assert(pSqlNode->fillType == NULL && pSqlNode->pGroupby == NULL && pSqlNode->pWhere == NULL && + pSqlNode->pSortOrder == NULL); + return doLocalQueryProcess(pCmd, pQueryInfo, pSqlNode); } - pQueryInfo->command = TSDB_SQL_SELECT; - - // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < fromSize; ++i) { - STableNamePair* item = taosArrayGet(pQuerySqlNode->from->tableList, i); - SStrToken* pTableItem = &item->name; + if (pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) { + // parse the subquery in the first place + SArray* list = taosArrayGetP(pSqlNode->from->list, 0); + SSqlNode* p = taosArrayGetP(list, 0); - if (pTableItem->type != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); - } - - tscDequoteAndTrimToken(pTableItem); - - SStrToken tableName = {.z = pTableItem->z, .n = pTableItem->n, .type = TK_STRING}; - if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); - } - - if (pQueryInfo->numOfTables <= i) { // more than one table - tscAddEmptyMetaInfo(pQueryInfo); + code = validateSqlNode(pSql, p, 0); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + return code; } - STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i); - code = tscSetTableFullName(pTableMetaInfo1, pTableItem, pSql); if (code != TSDB_CODE_SUCCESS) { return code; } - SStrToken* aliasName = &item->aliasName; - if (TPARSER_HAS_TOKEN(*aliasName)) { - if (aliasName->type != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); - } + pQueryInfo = pCmd->pQueryInfo[0]; - tscDequoteAndTrimToken(aliasName); + SQueryInfo* current = calloc(1, sizeof(SQueryInfo)); - SStrToken aliasName1 = {.z = aliasName->z, .n = aliasName->n, .type = TK_STRING}; - if (tscValidateName(&aliasName1) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); - } + tscInitQueryInfo(current); + taosArrayPush(current->pUpstream, &pQueryInfo); - if (aliasName1.n >= TSDB_TABLE_NAME_LEN) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); - } + STableMeta* pTableMeta = extractTempTableMetaFromNestQuery(pQueryInfo); + STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); + pTableMetaInfo1->pTableMeta = pTableMeta; - strncpy(pTableMetaInfo1->aliasName, aliasName1.z, aliasName1.n); - } else { - strncpy(pTableMetaInfo1->aliasName, tNameGetTableName(&pTableMetaInfo1->name), tListLen(pTableMetaInfo1->aliasName)); + current->pTableMetaInfo = calloc(1, POINTER_BYTES); + current->pTableMetaInfo[0] = pTableMetaInfo1; + current->numOfTables = 1; + current->order = pQueryInfo->order; + + pCmd->pQueryInfo[0] = current; + pQueryInfo->pDownstream = current; + + if (validateSelectNodeList(pCmd, current, pSqlNode->pSelNodeList, false, false, false) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; } - code = tscGetTableMeta(pSql, pTableMetaInfo1); - if (code != TSDB_CODE_SUCCESS) { - return code; + } else { + pQueryInfo->command = TSDB_SQL_SELECT; + + size_t fromSize = taosArrayGetSize(pSqlNode->from->list); + if (fromSize > TSDB_MAX_JOIN_TABLE_NUM) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - } - assert(pQueryInfo->numOfTables == taosArrayGetSize(pQuerySqlNode->from->tableList)); - bool isSTable = false; - - if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - isSTable = true; - code = tscGetSTableVgroupInfo(pSql, index); + // set all query tables, which are maybe more than one. + code = doLoadAllTableMeta(pSql, index, pSqlNode, (int32_t) fromSize); if (code != TSDB_CODE_SUCCESS) { return code; } - - TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_QUERY); - } else { - TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY); - } - // parse the group by clause in the first place - if (parseGroupbyClause(pQueryInfo, pQuerySqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } + bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); + if (isSTable) { + code = tscGetSTableVgroupInfo(pSql, index); // TODO refactor: getTablemeta along with vgroupInfo + if (code != TSDB_CODE_SUCCESS) { + return code; + } - // set where info - STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_QUERY); + } else { + TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY); + } - if (pQuerySqlNode->pWhere != NULL) { - if (parseWhereClause(pQueryInfo, &pQuerySqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + // parse the group by clause in the first place + if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - pQuerySqlNode->pWhere = NULL; - if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) { - pQueryInfo->window.skey = pQueryInfo->window.skey / 1000; - pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; - } - } else { // set the time rang - if (taosArrayGetSize(pQuerySqlNode->from->tableList) > 1) { // it is a join query, no where clause is not allowed. - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query "); - } - } + // set where info + STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - int32_t joinQuery = (pQuerySqlNode->from != NULL && taosArrayGetSize(pQuerySqlNode->from->tableList) > 1); - int32_t timeWindowQuery = - (TPARSER_HAS_TOKEN(pQuerySqlNode->interval.interval) || TPARSER_HAS_TOKEN(pQuerySqlNode->sessionVal.gap)); + if (pSqlNode->pWhere != NULL) { + if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } - if (parseSelectClause(pCmd, index, pQuerySqlNode->pSelectList, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } + pSqlNode->pWhere = NULL; + if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) { + pQueryInfo->window.skey = pQueryInfo->window.skey / 1000; + pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; + } + } else { // set the time rang + if (taosArrayGetSize(pSqlNode->from->list) > 1) { + // If it is a join query, no where clause is not allowed. + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query "); + } + } - // set order by info - if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } + int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); + int32_t timeWindowQuery = + (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)); - // set interval value - if (parseIntervalClause(pSql, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } else { - if (isTimeWindowQuery(pQueryInfo) && - (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { + if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, isSTable, joinQuery, timeWindowQuery) != + TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - } - // parse the having clause in the first place - if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } + // set order by info + if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != + TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } - /* - * transfer sql functions that need secondary merge into another format - * in dealing with super table queries such as: count/first/last - */ - if (isSTable) { - tscTansformFuncForSTableQuery(pQueryInfo); - - if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { + // set interval value + if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; + } else { + if (isTimeWindowQuery(pQueryInfo) && + (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_TSC_INVALID_SQL; + } } - } - if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } + // parse the having clause in the first place + if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) != + TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } - // no result due to invalid query time range - if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { - pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; - return TSDB_CODE_SUCCESS; - } + /* + * transfer sql functions that need secondary merge into another format + * in dealing with super table queries such as: count/first/last + */ + if (isSTable) { + tscTansformFuncForSTableQuery(pQueryInfo); - if (!hasTimestampForPointInterpQuery(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } + if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { + return TSDB_CODE_TSC_INVALID_SQL; + } + } - // in case of join query, time range is required. - if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { - int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); - if (timeRange == 0 && pQueryInfo->window.skey == 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; } - } - if ((code = parseLimitClause(pCmd, pQueryInfo, index, pQuerySqlNode, pSql)) != TSDB_CODE_SUCCESS) { - return code; - } + // no result due to invalid query time range + if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } - if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) { - return code; - } + if (!hasTimestampForPointInterpQuery(pQueryInfo)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } - updateLastScanOrderIfNeeded(pQueryInfo); - tscFieldInfoUpdateOffset(pQueryInfo); + // in case of join query, time range is required. + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); + if (timeRange == 0 && pQueryInfo->window.skey == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } - if (pQuerySqlNode->fillType != NULL) { - if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + if ((code = validateLimitNode(pCmd, pQueryInfo, index, pSqlNode, pSql)) != TSDB_CODE_SUCCESS) { + return code; } - /* - * fill options are set at the end position, when all columns are set properly - * the columns may be increased due to group by operation - */ - if ((code = checkQueryRangeForFill(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) { + if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) { return code; } - if ((code = parseFillClause(pCmd, pQueryInfo, pQuerySqlNode)) != TSDB_CODE_SUCCESS) { + updateLastScanOrderIfNeeded(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); + + if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) { return code; } } @@ -7302,7 +7364,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i return TSDB_CODE_SUCCESS; // Does not build query message here } -int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid) { +int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid) { tExprNode* pLeft = NULL; tExprNode* pRight= NULL; @@ -7316,6 +7378,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS if (pSqlExpr->pRight != NULL) { int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pQueryInfo, pCols, uid); if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(pLeft, NULL); return ret; } } @@ -7325,7 +7388,9 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS return TSDB_CODE_SUCCESS; } - if (pSqlExpr->pLeft == NULL) { + if (pSqlExpr->pLeft == NULL) { // it is the leaf node + assert(pSqlExpr->pRight == NULL); + if (pSqlExpr->type == SQL_NODE_VALUE) { *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_VALUE; @@ -7344,15 +7409,15 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* p1 = taosArrayGetP(pQueryInfo->exprList, i); + SExprInfo* p1 = taosArrayGetP(pQueryInfo->exprList, i); - if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) { - (*pExpr)->pSchema->type = (uint8_t)p1->resType; - (*pExpr)->pSchema->bytes = p1->resBytes; - (*pExpr)->pSchema->colId = p1->resColId; + if (strcmp((*pExpr)->pSchema->name, p1->base.aliasName) == 0) { + (*pExpr)->pSchema->type = (uint8_t)p1->base.resType; + (*pExpr)->pSchema->bytes = p1->base.resBytes; + (*pExpr)->pSchema->colId = p1->base.resColId; if (uid != NULL) { - *uid = p1->uid; + *uid = p1->base.uid; } break; @@ -7431,7 +7496,7 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); for (int32_t i = 0; i < numOfCols; ++i) { SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); - if (pCol->numOfFilters > 0) { + if (pCol->info.flist.numOfFilters > 0) { return true; } } @@ -7439,10 +7504,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { return false; } - - - - - - - diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8889e25177fe23668c67bbba363e0efa06a017d2..9528a553b24dc4eb6211bbef71a96013dc86b2a0 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -24,6 +24,7 @@ #include "tsclient.h" #include "ttimer.h" #include "tlockfree.h" +#include "qPlan.h" int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; @@ -147,7 +148,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { // Update the local cached epSet info cached by SqlObj int32_t inUse = pSql->epSet.inUse; tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); - tscDebug("%p update the epSet in SqlObj, in use before:%d, after:%d", pSql, inUse, pSql->epSet.inUse); + tscDebug("0x%"PRIx64" update the epSet in SqlObj, in use before:%d, after:%d", pSql->self, inUse, pSql->epSet.inUse); } @@ -221,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { assert(online <= total); if (online < total) { - tscError("HB:%p, total dnode:%d, online dnode:%d", pSql, total, online); + tscError("0x%"PRIx64", HB, total dnode:%d, online dnode:%d", pSql->self, total, online); pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; } @@ -245,11 +246,11 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (pObj->hbrid != 0) { int32_t waitingDuring = tsShellActivityTimer * 500; - tscDebug("%p send heartbeat in %dms", pSql, waitingDuring); + tscDebug("0x%"PRIx64" send heartbeat in %dms", pSql->self, waitingDuring); taosTmrReset(tscProcessActivityTimer, waitingDuring, (void *)pObj->rid, tscTmr, &pObj->pTimer); } else { - tscDebug("%p start to close tscObj:%p, not send heartbeat again", pSql, pObj); + tscDebug("0x%"PRIx64" start to close tscObj:%p, not send heartbeat again", pSql->self, pObj); } } @@ -269,11 +270,11 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { assert(pHB->self == pObj->hbrid); pHB->retry = 0; - int32_t code = tscProcessSql(pHB); + int32_t code = tscBuildAndSendRequest(pHB, NULL); taosReleaseRef(tscObjRef, pObj->hbrid); if (code != TSDB_CODE_SUCCESS) { - tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); + tscError("0x%"PRIx64" failed to sent HB to server, reason:%s", pHB->self, tstrerror(code)); } taosReleaseRef(tscRefId, rid); @@ -285,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) { char *pMsg = rpcMallocCont(pCmd->payloadLen); if (NULL == pMsg) { - tscError("%p msg:%s malloc failed", pSql, taosMsg[pSql->cmd.msgType]); + tscError("0x%"PRIx64" msg:%s malloc failed", pSql->self, taosMsg[pSql->cmd.msgType]); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -326,7 +327,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pSql->rpcRid = -1; if (pObj->signature != pObj) { - tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature); + tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature); taosRemoveRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle); @@ -334,10 +335,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { return; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) { - tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p", - pSql, pCmd->command, pQueryInfo->type, pObj, pObj->signature); + tscDebug("0x%"PRIx64" sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p", + pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature); taosRemoveRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle); @@ -369,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { pSql->retry++; - tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), pSql->retry); + tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry); pSql->res.code = rpcMsg->code; // keep the previous error code if (pSql->retry > pSql->maxRetry) { - tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); + tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry); } else { // wait for a little bit moment and then retry // todo do not sleep in rpc callback thread, add this process into queueu to process @@ -396,13 +397,13 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pRes->rspLen = 0; if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) { - tscDebug("%p query is cancelled, code:%s", pSql, tstrerror(pRes->code)); + tscDebug("0x%"PRIx64" query is cancelled, code:%s", pSql->self, tstrerror(pRes->code)); } else { pRes->code = rpcMsg->code; } if (pRes->code == TSDB_CODE_SUCCESS) { - tscDebug("%p reset retry counter to be 0 due to success rsp, old:%d", pSql, pSql->retry); + tscDebug("0x%"PRIx64" reset retry counter to be 0 due to success rsp, old:%d", pSql->self, pSql->retry); pSql->retry = 0; } @@ -437,10 +438,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pMsg->numOfFailedBlocks = htonl(pMsg->numOfFailedBlocks); pRes->numOfRows += pMsg->affectedRows; - tscDebug("%p SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql, sqlCmd[pCmd->command], + tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen); } else { - tscDebug("%p SQL cmd:%s, code:%s rspLen:%d", pSql, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen); + tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen); } } @@ -457,19 +458,16 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { (*pSql->fp)(pSql->param, pSql, rpcMsg->code); } - - if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it taosRemoveRef(tscObjRef, handle); - tscDebug("%p sqlObj is automatically freed", pSql); + tscDebug("0x%"PRIx64" sqlObj is automatically freed", pSql->self); } taosReleaseRef(tscObjRef, handle); - rpcFreeCont(rpcMsg->pCont); } -int doProcessSql(SSqlObj *pSql) { +int doBuildAndSendMsg(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -501,13 +499,16 @@ int doProcessSql(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -int tscProcessSql(SSqlObj *pSql) { +int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo) { char name[TSDB_TABLE_FNAME_LEN] = {0}; SSqlCmd *pCmd = &pSql->cmd; uint32_t type = 0; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (pQueryInfo == NULL) { + pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); + } + STableMetaInfo *pTableMetaInfo = NULL; if (pQueryInfo != NULL) { @@ -522,7 +523,7 @@ int tscProcessSql(SSqlObj *pSql) { assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } - tscDebug("%p SQL cmd:%s will be processed, name:%s, type:%d", pSql, sqlCmd[pCmd->command], name, type); + tscDebug("0x%"PRIx64" SQL cmd:%s will be processed, name:%s, type:%d", pSql->self, sqlCmd[pCmd->command], name, type); if (pCmd->command < TSDB_SQL_MGMT) { // the pTableMetaInfo cannot be NULL if (pTableMetaInfo == NULL) { pSql->res.code = TSDB_CODE_TSC_APP_ERROR; @@ -532,15 +533,16 @@ int tscProcessSql(SSqlObj *pSql) { return (*tscProcessMsgRsp[pCmd->command])(pSql); } - return doProcessSql(pSql); + return doBuildAndSendMsg(pSql); } int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg *) pSql->cmd.payload; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); - pRetrieveMsg->free = htons(pQueryInfo->type); - pRetrieveMsg->qId = htobe64(pSql->res.qId); + SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(&pSql->cmd); + + pRetrieveMsg->free = htons(pQueryInfo->type); + pRetrieveMsg->qId = htobe64(pSql->res.qId); // todo valid the vgroupId at the client side STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -562,11 +564,12 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } pRetrieveMsg->header.vgId = htonl(vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, vgId, vgIndex, pSql->res.qId); + tscDebug("0x%"PRIx64" build fetch msg from vgId:%d, vgIndex:%d, qId:0x%" PRIx64, pSql->self, vgId, vgIndex, pSql->res.qId); } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId); - tscDebug("%p build fetch msg from only one vgroup, vgId:%d, qId:%" PRIu64, pSql, pTableMeta->vgId, pSql->res.qId); + tscDebug("0x%"PRIx64" build fetch msg from only one vgroup, vgId:%d, qId:0x%" PRIx64, pSql->self, pTableMeta->vgId, + pSql->res.qId); } pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg); @@ -578,7 +581,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; char* pMsg = pSql->cmd.payload; @@ -605,7 +608,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); - tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, pTableMeta->vgId, pSql->cmd.numOfTablesInSubmit, + tscDebug("0x%"PRIx64" build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql->self, pTableMeta->vgId, pSql->cmd.numOfTablesInSubmit, pSql->epSet.numOfEps); return TSDB_CODE_SUCCESS; } @@ -617,12 +620,12 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5; SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, clauseIndex); int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo)); size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2); + int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2); int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0; int32_t sqlLen = (int32_t) strlen(pSql->sqlstr) + 1; @@ -646,8 +649,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } -static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg, int32_t *succeed) { - STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); +static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg, + int32_t *succeed) { TSKEY dfltKey = htobe64(pQueryMsg->window.skey); STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; @@ -663,7 +666,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; } else { - tscError("%p No vgroup info found", pSql); + tscError("0x%"PRIx64" No vgroup info found", pSql->self); *succeed = 0; return pMsg; @@ -671,7 +674,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char vgId = pVgroupInfo->vgId; tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo); - tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups); + tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, index, pTableMetaInfo->vgroupList->numOfVgroups); } else { vgId = pTableMeta->vgId; @@ -704,7 +707,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char int32_t numOfTables = (int32_t)taosArrayGetSize(pTableIdList->itemList); pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables - tscDebug("%p query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql, + tscDebug("0x%"PRIx64" query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql->self, pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups); // serialize each table id info @@ -722,322 +725,218 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char char n[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, n); - tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid); + tscDebug("0x%"PRIx64" vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql->self, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid); return pMsg; } -int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { - SSqlCmd *pCmd = &pSql->cmd; - - int32_t size = tscEstimateQueryMsgSize(pSql, pCmd->clauseIndex); - - if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { - tscError("%p failed to malloc for query msg", pSql); - return TSDB_CODE_TSC_INVALID_SQL; // todo add test for this - } - - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - - size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList); - if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) { - tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (uint64_t)numOfSrcCols, - tscGetNumOfColumns(pTableMeta)); +// TODO refactor +static int32_t serializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t numOfFilters, char** pMsg) { + // append the filter information after the basic column information + for (int32_t f = 0; f < numOfFilters; ++f) { + SColumnFilterInfo *pColFilter = &pColFilters[f]; - return TSDB_CODE_TSC_INVALID_SQL; - } - - if (pQueryInfo->interval.interval < 0) { - tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, pSql, (int64_t)pQueryInfo->interval.interval); - return TSDB_CODE_TSC_INVALID_SQL; - } - - if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) { - tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pQueryInfo->groupbyExpr.numOfGroupCols); - return TSDB_CODE_TSC_INVALID_SQL; - } - - SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; - tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); + SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)(*pMsg); + pFilterMsg->filterstr = htons(pColFilter->filterstr); - int32_t numOfTags = (int32_t)taosArrayGetSize(pTableMetaInfo->tagColList); - int32_t sqlLen = (int32_t) strlen(pSql->sqlstr); + (*pMsg) += sizeof(SColumnFilterInfo); - if (pQueryInfo->order.order == TSDB_ORDER_ASC) { - pQueryMsg->window.skey = htobe64(pQueryInfo->window.skey); - pQueryMsg->window.ekey = htobe64(pQueryInfo->window.ekey); - } else { - pQueryMsg->window.skey = htobe64(pQueryInfo->window.ekey); - pQueryMsg->window.ekey = htobe64(pQueryInfo->window.skey); - } - - pQueryMsg->order = htons(pQueryInfo->order.order); - pQueryMsg->orderColId = htons(pQueryInfo->order.orderColId); - pQueryMsg->fillType = htons(pQueryInfo->fillType); - pQueryMsg->limit = htobe64(pQueryInfo->limit.limit); - pQueryMsg->offset = htobe64(pQueryInfo->limit.offset); - pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList)); - pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval); - pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding); - pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset); - pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit; - pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit; - pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit; - pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); - pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); - pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len); - pQueryMsg->numOfTags = htonl(numOfTags); - pQueryMsg->queryType = htonl(pQueryInfo->type); - pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit); - pQueryMsg->sqlstrLen = htonl(sqlLen); - pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen); - pQueryMsg->sw.gap = htobe64(pQueryInfo->sessionWindow.gap); - pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX); - - size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); - pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number - - // set column list ids - size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); - char *pMsg = (char *)(pQueryMsg->colList) + numOfCols * sizeof(SColumnInfo); - SSchema *pSchema = tscGetTableSchema(pTableMeta); - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i); - SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex]; - - if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) { - char n[TSDB_TABLE_FNAME_LEN] = {0}; - tNameExtractFullName(&pTableMetaInfo->name, n); + if (pColFilter->filterstr) { + pFilterMsg->len = htobe64(pColFilter->len); + memcpy(*pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1)); + (*pMsg) += (pColFilter->len + 1); // append the additional filter binary info + } else { + pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi); + pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi); + } + pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr); + pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr); - tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s", - pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex, - pColSchema->name); + if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) { + tscError("invalid filter info"); return TSDB_CODE_TSC_INVALID_SQL; } + } - pQueryMsg->colList[i].colId = htons(pColSchema->colId); - pQueryMsg->colList[i].bytes = htons(pColSchema->bytes); - pQueryMsg->colList[i].type = htons(pColSchema->type); - pQueryMsg->colList[i].numOfFilters = htons(pCol->numOfFilters); - - // append the filter information after the basic column information - for (int32_t f = 0; f < pCol->numOfFilters; ++f) { - SColumnFilterInfo *pColFilter = &pCol->filterInfo[f]; - - SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pMsg; - pFilterMsg->filterstr = htons(pColFilter->filterstr); + return TSDB_CODE_SUCCESS; +} - pMsg += sizeof(SColumnFilterInfo); +static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, int64_t id, bool validateColumn) { + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - if (pColFilter->filterstr) { - pFilterMsg->len = htobe64(pColFilter->len); - memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1)); - pMsg += (pColFilter->len + 1); // append the additional filter binary info - } else { - pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi); - pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi); - } + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("0x%"PRIx64" table has already been destroyed", id); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } - pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr); - pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr); + if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { + tscError("0x%"PRIx64" table schema is not matched with parsed sql", id); + return TSDB_CODE_TSC_INVALID_SQL; + } - if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) { - tscError("invalid filter info"); - return TSDB_CODE_TSC_INVALID_SQL; - } + assert(pExpr->resColId < 0); + SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg); + + SColIndex* pIndex = &pSqlExpr->colInfo; + + pIndex->colId = htons(pExpr->colInfo.colId); + pIndex->colIndex = htons(pExpr->colInfo.colIndex); + pIndex->flag = htons(pExpr->colInfo.flag); + pSqlExpr->uid = htobe64(pExpr->uid); + pSqlExpr->colType = htons(pExpr->colType); + pSqlExpr->colBytes = htons(pExpr->colBytes); + pSqlExpr->resType = htons(pExpr->resType); + pSqlExpr->resBytes = htons(pExpr->resBytes); + pSqlExpr->functionId = htons(pExpr->functionId); + pSqlExpr->numOfParams = htons(pExpr->numOfParams); + pSqlExpr->resColId = htons(pExpr->resColId); + pSqlExpr->flist.numOfFilters = htons(pExpr->flist.numOfFilters); + + (*pMsg) += sizeof(SSqlExpr); + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log + pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType); + pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen); + + if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { + memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen); + (*pMsg) += pExpr->param[j].nLen; + } else { + pSqlExpr->param[j].i64 = htobe64(pExpr->param[j].i64); } } - SSqlFuncMsg *pSqlFuncExpr = (SSqlFuncMsg *)pMsg; - for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + serializeColFilterInfo(pExpr->flist.filterInfo, pExpr->flist.numOfFilters, pMsg); - // the queried table has been removed and a new table with the same name has already been created already - // return error msg - if (pExpr->uid != pTableMeta->id.uid) { - tscError("%p table has already been destroyed", pSql); - return TSDB_CODE_TSC_INVALID_TABLE_NAME; - } - - if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { - tscError("%p table schema is not matched with parsed sql", pSql); - return TSDB_CODE_TSC_INVALID_SQL; - } + return TSDB_CODE_SUCCESS; +} - assert(pExpr->resColId < 0); +int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SSqlCmd *pCmd = &pSql->cmd; - pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId); - pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex); - pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag); + int32_t code = TSDB_CODE_SUCCESS; + int32_t size = tscEstimateQueryMsgSize(pSql, pCmd->clauseIndex); - if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { - pSqlFuncExpr->colType = htons(pExpr->resType); - pSqlFuncExpr->colBytes = htons(pExpr->resBytes); - } else if (pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { - SSchema *s = tGetTbnameColumnSchema(); + if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { + tscError("%p failed to malloc for query msg", pSql); + return TSDB_CODE_TSC_INVALID_SQL; // todo add test for this + } - pSqlFuncExpr->colType = htons(s->type); - pSqlFuncExpr->colBytes = htons(s->bytes); - } else if (pExpr->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { - SSchema s = tGetBlockDistColumnSchema(); + SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(pCmd); - pSqlFuncExpr->colType = htons(s.type); - pSqlFuncExpr->colBytes = htons(s.bytes); - } else { - SSchema* s = tscGetColumnSchemaById(pTableMeta, pExpr->colInfo.colId); - pSqlFuncExpr->colType = htons(s->type); - pSqlFuncExpr->colBytes = htons(s->bytes); - } + SQueryAttr query = {{0}}; + tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); - pSqlFuncExpr->functionId = htons(pExpr->functionId); - pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); - pSqlFuncExpr->resColId = htons(pExpr->resColId); - if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) { - pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters); - } else { - pSqlFuncExpr->filterNum = 0; - } + SArray* tableScanOperator = createTableScanPlan(&query); + SArray* queryOperator = createExecOperatorPlan(&query); - pMsg += sizeof(SSqlFuncMsg); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - if (pSqlFuncExpr->filterNum) { - pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters; + SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; + tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); - // append the filter information after the basic column information - for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) { - SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f]; + int32_t numOfTags = query.numOfTags; + int32_t sqlLen = (int32_t) strlen(pSql->sqlstr); - SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f]; - pFilterMsg->filterstr = htons(pColFilter->filterstr); + if (taosArrayGetSize(tableScanOperator) == 0) { + pQueryMsg->tableScanOperator = htonl(-1); + } else { + int32_t* tablescanOp = taosArrayGet(tableScanOperator, 0); + pQueryMsg->tableScanOperator = htonl(*tablescanOp); + } + + pQueryMsg->window.skey = htobe64(query.window.skey); + pQueryMsg->window.ekey = htobe64(query.window.ekey); + + pQueryMsg->order = htons(query.order.order); + pQueryMsg->orderColId = htons(query.order.orderColId); + pQueryMsg->fillType = htons(query.fillType); + pQueryMsg->limit = htobe64(query.limit.limit); + pQueryMsg->offset = htobe64(query.limit.offset); + pQueryMsg->numOfCols = htons(query.numOfCols); + + pQueryMsg->interval.interval = htobe64(query.interval.interval); + pQueryMsg->interval.sliding = htobe64(query.interval.sliding); + pQueryMsg->interval.offset = htobe64(query.interval.offset); + pQueryMsg->interval.intervalUnit = query.interval.intervalUnit; + pQueryMsg->interval.slidingUnit = query.interval.slidingUnit; + pQueryMsg->interval.offsetUnit = query.interval.offsetUnit; + + pQueryMsg->stableQuery = query.stableQuery; + pQueryMsg->topBotQuery = query.topBotQuery; + pQueryMsg->groupbyColumn = query.groupbyColumn; + pQueryMsg->hasTagResults = query.hasTagResults; + pQueryMsg->timeWindowInterpo = query.timeWindowInterpo; + pQueryMsg->queryBlockDist = query.queryBlockDist; + pQueryMsg->stabledev = query.stabledev; + pQueryMsg->tsCompQuery = query.tsCompQuery; + pQueryMsg->simpleAgg = query.simpleAgg; + pQueryMsg->pointInterpQuery = query.pointInterpQuery; + pQueryMsg->needReverseScan = query.needReverseScan; + + pQueryMsg->numOfTags = htonl(numOfTags); + pQueryMsg->sqlstrLen = htonl(sqlLen); + pQueryMsg->sw.gap = htobe64(query.sw.gap); + pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX); + + pQueryMsg->secondStageOutput = htonl(query.numOfExpr2); + pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number - if (pColFilter->filterstr) { - pFilterMsg->len = htobe64(pColFilter->len); - memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1)); - pMsg += (pColFilter->len + 1); // append the additional filter binary info - } else { - pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi); - pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi); - } + pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); + pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); + pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len); + pQueryMsg->queryType = htonl(pQueryInfo->type); + pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen); - pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr); - pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr); + // set column list ids + size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); + char *pMsg = (char *)(pQueryMsg->tableCols) + numOfCols * sizeof(SColumnInfo); - if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) { - tscError("invalid filter info"); - return TSDB_CODE_TSC_INVALID_SQL; - } - } - } + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfo *pCol = &query.tableCols[i]; + pQueryMsg->tableCols[i].colId = htons(pCol->colId); + pQueryMsg->tableCols[i].bytes = htons(pCol->bytes); + pQueryMsg->tableCols[i].type = htons(pCol->type); + pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters); - for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log - pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); - pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen); + // append the filter information after the basic column information + serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg); + } - if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { - memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen); - pMsg += pExpr->param[j].nLen; - } else { - pSqlFuncExpr->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64); - } + for (int32_t i = 0; i < query.numOfOutput; ++i) { + code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql->self, true); + if (code != TSDB_CODE_SUCCESS) { + goto _end; } - - pSqlFuncExpr = (SSqlFuncMsg *)pMsg; } - size_t output = tscNumOfFields(pQueryInfo); - - if (tscIsSecondStageQuery(pQueryInfo)) { - pQueryMsg->secondStageOutput = htonl((int32_t) output); - - SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; - - for (int32_t i = 0; i < output; ++i) { - SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); - SSqlExpr *pExpr = pField->pSqlExpr; - - // this should be switched to projection query - if (pExpr != NULL) { - // the queried table has been removed and a new table with the same name has already been created already - // return error msg - if (pExpr->uid != pTableMeta->id.uid) { - tscError("%p table has already been destroyed", pSql); - return TSDB_CODE_TSC_INVALID_TABLE_NAME; - } - - if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { - tscError("%p table schema is not matched with parsed sql", pSql); - return TSDB_CODE_TSC_INVALID_SQL; - } - - pSqlFuncExpr1->numOfParams = 0; // no params for projection query - pSqlFuncExpr1->functionId = htons(TSDB_FUNC_PRJ); - pSqlFuncExpr1->colInfo.colId = htons(pExpr->resColId); - pSqlFuncExpr1->colInfo.flag = htons(TSDB_COL_NORMAL); - - bool assign = false; - for (int32_t f = 0; f < tscSqlExprNumOfExprs(pQueryInfo); ++f) { - SSqlExpr *pe = tscSqlExprGet(pQueryInfo, f); - if (pe == pExpr) { - pSqlFuncExpr1->colInfo.colIndex = htons(f); - pSqlFuncExpr1->colType = htons(pe->resType); - pSqlFuncExpr1->colBytes = htons(pe->resBytes); - assign = true; - break; - } - } - - assert(assign); - pMsg += sizeof(SSqlFuncMsg); - pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; - } else { - assert(pField->pArithExprInfo != NULL); - SExprInfo* pExprInfo = pField->pArithExprInfo; - - pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId); - pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId); - pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams); - pMsg += sizeof(SSqlFuncMsg); - - for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { - // todo add log - pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType); - pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes); - - if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) { - memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes); - pMsg += pExprInfo->base.arg[j].argBytes; - } else { - pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64); - } - } - - pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; - } + for (int32_t i = 0; i < query.numOfExpr2; ++i) { + code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql->self, false); + if (code != TSDB_CODE_SUCCESS) { + goto _end; } - } else { - pQueryMsg->secondStageOutput = 0; } int32_t succeed = 1; - + // serialize the table info (sid, uid, tags) - pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg, &succeed); + pMsg = doSerializeTableInfo(pQueryMsg, pSql, pTableMetaInfo, pMsg, &succeed); if (succeed == 0) { - return TSDB_CODE_TSC_APP_ERROR; + code = TSDB_CODE_TSC_APP_ERROR; + goto _end; } - SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr; + SSqlGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr; if (pGroupbyExpr->numOfGroupCols > 0) { pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); pQueryMsg->orderType = htons(pGroupbyExpr->orderType); for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) { SColIndex* pCol = taosArrayGet(pGroupbyExpr->columnInfo, j); - + *((int16_t *)pMsg) = htons(pCol->colId); pMsg += sizeof(pCol->colId); @@ -1046,48 +945,29 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { *((int16_t *)pMsg) += htons(pCol->flag); pMsg += sizeof(pCol->flag); - + memcpy(pMsg, pCol->name, tListLen(pCol->name)); pMsg += tListLen(pCol->name); } } - if (pQueryInfo->fillType != TSDB_FILL_NONE) { - for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { - *((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]); - pMsg += sizeof(pQueryInfo->fillVal[0]); + if (query.fillType != TSDB_FILL_NONE) { + for (int32_t i = 0; i < query.numOfOutput; ++i) { + *((int64_t *)pMsg) = htobe64(query.fillVal[i]); + pMsg += sizeof(query.fillVal[0]); } } - - if (numOfTags != 0) { - int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); - int32_t numOfTagColumns = tscGetNumOfTags(pTableMeta); - int32_t total = numOfTagColumns + numOfColumns; - - pSchema = tscGetTableTagSchema(pTableMeta); - - for (int32_t i = 0; i < numOfTags; ++i) { - SColumn *pCol = taosArrayGetP(pTableMetaInfo->tagColList, i); - SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex]; - - if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) || - (!isValidDataType(pColSchema->type))) { - char n[TSDB_TABLE_FNAME_LEN] = {0}; - tNameExtractFullName(&pTableMetaInfo->name, n); - tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s", - pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name); + if (query.numOfTags > 0) { + for (int32_t i = 0; i < query.numOfTags; ++i) { + SColumnInfo* pTag = &query.tagColList[i]; - return TSDB_CODE_TSC_INVALID_SQL; - } - SColumnInfo* pTagCol = (SColumnInfo*) pMsg; - - pTagCol->colId = htons(pColSchema->colId); - pTagCol->bytes = htons(pColSchema->bytes); - pTagCol->type = htons(pColSchema->type); - pTagCol->numOfFilters = 0; - + pTagCol->colId = htons(pTag->colId); + pTagCol->bytes = htons(pTag->bytes); + pTagCol->type = htons(pTag->type); + pTagCol->flist.numOfFilters = 0; + pMsg += sizeof(SColumnInfo); } } @@ -1095,12 +975,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // serialize tag column query condition if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) { STagCond* pTagCond = &pQueryInfo->tagCond; - + SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid); if (pCond != NULL && pCond->cond != NULL) { pQueryMsg->tagCondLen = htons(pCond->len); memcpy(pMsg, pCond->cond, pCond->len); - + pMsg += pCond->len; } } @@ -1117,21 +997,30 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } // compressed ts block - pQueryMsg->tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); + pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); if (pQueryInfo->tsBuf != NULL) { // note: here used the index instead of actual vnode id. int32_t vnodeIndex = pTableMetaInfo->vgroupIndex; - int32_t code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsLen, &pQueryMsg->tsNumOfBlocks); + code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } - pMsg += pQueryMsg->tsLen; + pMsg += pQueryMsg->tsBuf.tsLen; + + pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); + pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); + pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); + } + + int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator); + pQueryMsg->numOfOperator = htonl(numOfOperator); + for(int32_t i = 0; i < numOfOperator; ++i) { + int32_t *operator = taosArrayGet(queryOperator, i); + *(int32_t*)pMsg = htonl(*operator); - pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); - pQueryMsg->tsLen = htonl(pQueryMsg->tsLen); - pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks); + pMsg += sizeof(int32_t); } memcpy(pMsg, pSql->sqlstr, sqlLen); @@ -1139,14 +1028,18 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t msgLen = (int32_t)(pMsg - pCmd->payload); - tscDebug("%p msg built success, len:%d bytes", pSql, msgLen); + tscDebug("0x%"PRIx64" msg built success, len:%d bytes", pSql->self, msgLen); pCmd->payloadLen = msgLen; pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; - + pQueryMsg->head.contLen = htonl(msgLen); assert(msgLen + minMsgSize() <= (int32_t)pCmd->allocSize); - return TSDB_CODE_SUCCESS; + _end: + freeQueryAttr(&query); + taosArrayDestroy(tableScanOperator); + taosArrayDestroy(queryOperator); + return code; } int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { @@ -1169,7 +1062,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; pCmd->payloadLen = sizeof(SCreateDnodeMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1187,7 +1080,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; pCmd->payloadLen = sizeof(SCreateAcctMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1233,7 +1126,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SCreateUserMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1272,7 +1165,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SDropDbMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1294,7 +1187,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SCMDropTableMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1315,7 +1208,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SDropDnodeMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1336,7 +1229,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1351,7 +1244,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SUseDbMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1368,7 +1261,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SSyncDbMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1387,7 +1280,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SShowMsg) + 100; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1467,13 +1360,13 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSchema *pSchema; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); // Reallocate the payload size size = tscEstimateCreateTableMsgLength(pSql, pInfo); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { - tscError("%p failed to malloc for create table msg", pSql); + tscError("0x%"PRIx64" failed to malloc for create table msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1536,7 +1429,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg = (char *)pSchema; if (type == TSQL_CREATE_STREAM) { // check if it is a stream sql - SQuerySqlNode *pQuerySql = pInfo->pCreateTableInfo->pSelect; + SSqlNode *pQuerySql = pInfo->pCreateTableInfo->pSelect; strncpy(pMsg, pQuerySql->sqlstr.z, pQuerySql->sqlstr.n + 1); pCreateMsg->sqlLen = htons(pQuerySql->sqlstr.n + 1); @@ -1556,7 +1449,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); return minMsgSize() + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE; } @@ -1565,14 +1458,14 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int msgLen = 0; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo; int size = tscEstimateAlterTableMsgLength(pCmd); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { - tscError("%p failed to malloc for alter table msg", pSql); + tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1614,7 +1507,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload; pCmd->payloadLen = htonl(pUpdateMsg->head.contLen); - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; SNewVgroupInfo vgroupInfo = {.vgId = -1}; @@ -1646,11 +1539,11 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SRetrieveTableMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg*)pCmd->payload; pRetrieveMsg->qId = htobe64(pSql->res.qId); pRetrieveMsg->free = htons(pQueryInfo->type); @@ -1674,7 +1567,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { pRes->row = 0; pRes->rspType = 1; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { return pRes->code; } @@ -1722,16 +1615,32 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) { return code; } - pRes->code = tscDoLocalMerge(pSql); + // global aggregation may be the upstream for parent query + SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(pCmd); + if (pQueryInfo->pQInfo == NULL) { + STableGroupInfo tableGroupInfo = {.numOfTables = 1, .pGroupList = taosArrayInit(1, POINTER_BYTES),}; + tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - tscCreateResPointerInfo(pRes, pQueryInfo); - tscSetResRawPtr(pRes, pQueryInfo); + STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN}; + + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); + taosArrayPush(group, &tableKeyInfo); + taosArrayPush(tableGroupInfo.pGroupList, &group); + + SExprInfo* list = calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SExprInfo)); + for(int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExprInfo = tscSqlExprGet(pQueryInfo, i); + list[i] = *pExprInfo; + } + + pQueryInfo->pQInfo = createQueryInfoFromQueryNode(pQueryInfo, list, &tableGroupInfo, NULL, NULL, pRes->pLocalMerger, MERGE_STAGE); } - pRes->row = 0; - pRes->completed = (pRes->numOfRows == 0); + uint64_t localQueryId = 0; + qTableQuery(pQueryInfo->pQInfo, &localQueryId); + convertQueryResult(pRes, pQueryInfo); + + handleDownstreamOperator(pRes, pQueryInfo); code = pRes->code; if (pRes->code == TSDB_CODE_SUCCESS) { @@ -1752,7 +1661,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pCmd->payloadLen = sizeof(SConnectMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { - tscError("%p failed to malloc for query msg", pSql); + tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1779,7 +1688,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload; @@ -1837,7 +1746,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize); - tscDebug("%p build load multi-metermeta msg completed, numOfTables:%d, msg size:%d", pSql, pCmd->count, + tscDebug("0x%"PRIx64" build load multi-metermeta msg completed, numOfTables:%d, msg size:%d", pSql->self, pCmd->count, pCmd->payloadLen); return pCmd->payloadLen; @@ -1849,7 +1758,7 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; char* pMsg = pCmd->payload; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); SSTableVgroupMsg *pStableVgroupMsg = (SSTableVgroupMsg *)pMsg; pStableVgroupMsg->numOfTables = htonl(pQueryInfo->numOfTables); @@ -1892,7 +1801,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { pthread_mutex_unlock(&pObj->mutex); - tscError("%p failed to create heartbeat msg", pSql); + tscError("0x%"PRIx64" failed to create heartbeat msg", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1968,7 +1877,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg); if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) { - tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name)); + tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name)); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -2017,7 +1926,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { } } - tscDebug("%p recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, + tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name)); free(pTableMeta); @@ -2124,7 +2033,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) { pSql->res.code = TSDB_CODE_SUCCESS; pSql->res.numOfTotal = i; - tscDebug("%p load multi-metermeta resp from complete num:%d", pSql, pSql->res.numOfTotal); + tscDebug("0x%"PRIx64" load multi-metermeta resp from complete num:%d", pSql->self, pSql->res.numOfTotal); #endif return TSDB_CODE_SUCCESS; @@ -2161,8 +2070,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; if (pInfo->vgroupList->numOfVgroups <= 0) { - //tfree(pInfo->vgroupList); - tscError("%p empty vgroup info", pSql); + tscDebug("0x%"PRIx64" empty vgroup info, no corresponding tables for stable", pSql->self); } else { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { // just init, no need to lock @@ -2212,7 +2120,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -2244,15 +2152,16 @@ int tscProcessShowRsp(SSqlObj *pSql) { SColumnIndex index = {0}; pSchema = pMetaMsg->schema; - + + uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) { index.columnIndex = i; - tscColumnListInsert(pQueryInfo->colList, &index); + tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema); TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes); SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, + pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false); } @@ -2271,7 +2180,7 @@ static void createHbObj(STscObj* pObj) { pSql->fp = tscProcessHeartBeatRsp; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoS(&pSql->cmd, 0); if (pQueryInfo == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; tfree(pSql); @@ -2292,7 +2201,7 @@ static void createHbObj(STscObj* pObj) { pSql->signature = pSql; registerSqlObj(pSql); - tscDebug("%p HB is allocated, pObj:%p", pSql, pObj); + tscDebug("0x%"PRIx64" HB is allocated, pObj:%p", pSql->self, pObj); pObj->hbrid = pSql->self; } @@ -2318,7 +2227,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) { tscUpdateMgmtEpSet(pSql, &pConnect->epSet); for (int i = 0; i < pConnect->epSet.numOfEps; ++i) { - tscDebug("%p epSet.fqdn[%d]: %s, pObj:%p", pSql, i, pConnect->epSet.fqdn[i], pObj); + tscDebug("0x%"PRIx64" epSet.fqdn[%d]: %s, pObj:%p", pSql->self, i, pConnect->epSet.fqdn[i], pObj); } } @@ -2362,9 +2271,9 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { tNameExtractFullName(&pTableMetaInfo->name, name); taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); - tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, name, (int32_t) taosHashGetSize(tscTableMetaInfo)); + tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaInfo)); - pTableMetaInfo->pTableMeta = NULL; + tfree(pTableMetaInfo->pTableMeta); return 0; } @@ -2374,7 +2283,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); - tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name); + tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name); bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); @@ -2399,13 +2308,14 @@ int tscProcessShowCreateRsp(SSqlObj *pSql) { int tscProcessQueryRsp(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; - SQueryTableRsp *pQuery = (SQueryTableRsp *)pRes->pRsp; - pQuery->qId = htobe64(pQuery->qId); - pRes->qId = pQuery->qId; + SQueryTableRsp *pQueryAttr = (SQueryTableRsp *)pRes->pRsp; + pQueryAttr->qId = htobe64(pQueryAttr->qId); + pRes->qId = pQueryAttr->qId; pRes->data = NULL; + tscResetForNextRetrieve(pRes); - tscDebug("%p query rsp received, qId:%"PRIu64, pSql, pRes->qId); + tscDebug("0x%"PRIx64" query rsp received, qId:0x%"PRIx64, pSql->self, pRes->qId); return 0; } @@ -2428,7 +2338,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { pRes->completed = (pRetrieve->completed == 1); pRes->data = pRetrieve->data; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetActiveQueryInfo(pCmd); if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { return pRes->code; } @@ -2442,6 +2352,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { tscSetResRawPtr(pRes, pQueryInfo); } + handleDownstreamOperator(pRes, pQueryInfo); + if (pSql->pSubscription != NULL) { int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput; @@ -2463,7 +2375,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { } pRes->row = 0; - tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d, qId:%"PRIu64, pSql, pRes->numOfRows, pRes->offset, + tscDebug("0x%"PRIx64" numOfRows:%d, offset:%" PRId64 ", complete:%d, qId:0x%"PRIx64, pSql->self, pRes->numOfRows, pRes->offset, pRes->completed, pRes->qId); return 0; @@ -2474,7 +2386,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code); static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); if (NULL == pNew) { - tscError("%p malloc failed for new sqlobj to get table meta", pSql); + tscError("0x%"PRIx64" malloc failed for new sqlobj to get table meta", pSql->self); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2482,13 +2394,13 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn pNew->signature = pNew; pNew->cmd.command = TSDB_SQL_META; - tscAddSubqueryInfo(&pNew->cmd); + tscAddQueryInfo(&pNew->cmd); - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd, 0); pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) { - tscError("%p malloc failed for payload to get table meta", pSql); + tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self); tscFreeSqlObj(pNew); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2501,24 +2413,24 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn if (pSql->cmd.autoCreated) { int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); if (code != TSDB_CODE_SUCCESS) { - tscError("%p malloc failed for new tag data to get table meta", pSql); + tscError("0x%"PRIx64" malloc failed for new tag data to get table meta", pSql->self); tscFreeSqlObj(pNew); return TSDB_CODE_TSC_OUT_OF_MEMORY; } } - tscDebug("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated); - registerSqlObj(pNew); + tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get tableMeta, auto create:%d", pSql->self, pNew->self, + pNew->cmd.autoCreated); pNew->fp = tscTableMetaCallBack; pNew->param = (void *)pSql->self; - tscDebug("%p metaRid from %" PRId64 " to %" PRId64 , pSql, pSql->metaRid, pNew->self); + tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->metaRid, pNew->self); pSql->metaRid = pNew->self; - int32_t code = tscProcessSql(pNew); + int32_t code = tscBuildAndSendRequest(pNew, NULL); if (code == TSDB_CODE_SUCCESS) { code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated } @@ -2573,19 +2485,19 @@ int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool create int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); char name[TSDB_TABLE_FNAME_LEN] = {0}; int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name); if (code != TSDB_CODE_SUCCESS) { - tscError("%p failed to generate the table full name", pSql); + tscError("0x%"PRIx64" failed to generate the table full name", pSql->self); return TSDB_CODE_TSC_INVALID_SQL; } STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; if (pTableMeta) { - tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name, + tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql->self, name, tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid); } @@ -2597,7 +2509,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { } static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, clauseIndex); for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); if (pTableMetaInfo->vgroupList == NULL) { @@ -2624,13 +2536,13 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) { pNew->cmd.command = TSDB_SQL_STABLEVGROUP; // TODO TEST IT - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd, 0); if (pNewQueryInfo == NULL) { tscFreeSqlObj(pNew); return code; } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, clauseIndex); for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i); STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta); @@ -2645,16 +2557,16 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) { pNewQueryInfo->numOfTables = pQueryInfo->numOfTables; registerSqlObj(pNew); - tscDebug("%p svgroupRid from %" PRId64 " to %" PRId64 , pSql, pSql->svgroupRid, pNew->self); + tscDebug("0x%"PRIx64" svgroupRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->svgroupRid, pNew->self); pSql->svgroupRid = pNew->self; - tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables); + tscDebug("0x%"PRIx64" new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql->self, pNew, pNewQueryInfo->numOfTables); pNew->fp = tscTableMetaCallBack; pNew->param = (void *)pSql->self; - code = tscProcessSql(pNew); + code = tscBuildAndSendRequest(pNew, NULL); if (code == TSDB_CODE_SUCCESS) { code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; } @@ -2730,7 +2642,7 @@ void tscInitMsgsFp() { tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp; tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp; - + tscKeepConn[TSDB_SQL_SHOW] = 1; tscKeepConn[TSDB_SQL_RETRIEVE] = 1; tscKeepConn[TSDB_SQL_SELECT] = 1; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 13c8f025eaf7b3fb0cdf5e0a7fbc6328e8bd12e6..8dbb1c0a52b67d6067fe379423db5dc9b0610d00 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -191,7 +191,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass, pSql->fp = syncConnCallback; pSql->param = pSql; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); tsem_wait(&pSql->rspSem); if (pSql->res.code != TSDB_CODE_SUCCESS) { @@ -265,7 +265,7 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, if (taos) *taos = pObj; pSql->fetchFp = fp; - pSql->res.code = tscProcessSql(pSql); + pSql->res.code = tscBuildAndSendRequest(pSql, NULL); tscDebug("%p DB async connection is opening", taos); return pObj; } @@ -292,7 +292,7 @@ void taos_close(TAOS *taos) { pHb->rpcRid = -1; } - tscDebug("%p HB is freed", pHb); + tscDebug("0x%"PRIx64" HB is freed", pHb->self); taosReleaseRef(tscObjRef, pHb->self); #ifdef __APPLE__ // to satisfy later tsem_destroy in taos_free_result @@ -373,7 +373,7 @@ int taos_num_fields(TAOS_RES *res) { if (pSql == NULL || pSql->signature != pSql) return 0; int32_t num = 0; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (pQueryInfo == NULL) { return num; } @@ -408,7 +408,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SSqlRes *pRes = &pSql->res; if (pSql == NULL || pSql->signature != pSql) return 0; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (pQueryInfo == NULL) { return NULL; } @@ -559,7 +559,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { return true; } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) { return true; @@ -576,9 +576,9 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { cmd == TSDB_SQL_FETCH)) { pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]); + tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]); - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return false; } @@ -588,13 +588,13 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { void taos_free_result(TAOS_RES *res) { SSqlObj* pSql = (SSqlObj*) res; if (pSql == NULL || pSql->signature != pSql) { - tscError("%p already released sqlObj", res); + tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1); return; } bool freeNow = tscKillQueryInDnode(pSql); if (freeNow) { - tscDebug("%p free sqlObj in cache", pSql); + tscDebug("0x%"PRIx64" free sqlObj in cache", pSql->self); taosReleaseRef(tscObjRef, pSql->self); } } @@ -672,7 +672,7 @@ char *taos_get_client_info() { return version; } static void tscKillSTableQuery(SSqlObj *pSql) { SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) { return; @@ -708,7 +708,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) { tscUnlockByThread(&pSql->squeryLock); - tscDebug("%p super table query cancelled", pSql); + tscDebug("0x%"PRIx64" super table query cancelled", pSql->self); } void taos_stop_query(TAOS_RES *res) { @@ -717,13 +717,13 @@ void taos_stop_query(TAOS_RES *res) { return; } - tscDebug("%p start to cancel query", res); + tscDebug("0x%"PRIx64" start to cancel query", pSql->self); SSqlCmd *pCmd = &pSql->cmd; // set the error code for master pSqlObj firstly pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { assert(pSql->rpcRid <= 0); @@ -744,7 +744,7 @@ void taos_stop_query(TAOS_RES *res) { } } - tscDebug("%p query is cancelled", res); + tscDebug("0x%"PRIx64" query is cancelled", pSql->self); } bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { @@ -753,7 +753,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { return true; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (pQueryInfo == NULL) { return true; } @@ -877,19 +877,18 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pRes->numOfClauseTotal = 0; - tscDebug("%p Valid SQL: %s pObj:%p", pSql, sql, pObj); + tscDebug("0x%"PRIx64" Valid SQL: %s pObj:%p", pSql->self, sql, pObj); int32_t sqlLen = (int32_t)strlen(sql); if (sqlLen > tsMaxSQLStringLen) { - tscError("%p sql too long", pSql); + tscError("0x%"PRIx64" sql too long", pSql->self); tfree(pSql); return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); if (pSql->sqlstr == NULL) { - tscError("%p failed to malloc sql string buffer", pSql); - tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); + tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tfree(pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { } if (code != TSDB_CODE_SUCCESS) { - tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(pSql), pObj); + tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj); } taos_free_result(pSql); @@ -933,7 +932,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; char *str = (char *)tblNameList; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd, pCmd->clauseIndex); if (pQueryInfo == NULL) { pSql->res.code = terrno; return terrno; @@ -1027,18 +1026,18 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { pRes->numOfClauseTotal = 0; assert(pSql->fp == NULL); - tscDebug("%p tableNameList: %s pObj:%p", pSql, tableNameList, pObj); + tscDebug("0x%"PRIx64" tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj); int32_t tblListLen = (int32_t)strlen(tableNameList); if (tblListLen > MAX_TABLE_NAME_LENGTH) { - tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH); + tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH); tscFreeSqlObj(pSql); return TSDB_CODE_TSC_INVALID_SQL; } char *str = calloc(1, tblListLen + 1); if (str == NULL) { - tscError("%p failed to malloc sql string buffer", pSql); + tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tscFreeSqlObj(pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1048,7 +1047,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. - * If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql() + * If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscBuildAndSendRequest() * to free connection, which may cause segment fault, when the parse phrase is not even successfully executed. */ pRes->qId = 0; @@ -1061,7 +1060,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { tscDoQuery(pSql); - tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); + tscDebug("0x%"PRIx64" load multi-table meta result:%d %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj); if ((code = pRes->code) != TSDB_CODE_SUCCESS) { tscFreeSqlObj(pSql); } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 7699e6f45973c8ca834e0e93ad5f5f31797ec520..17bf575b60eed9f71e72a608d0aef4944c887bef 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -37,8 +37,8 @@ static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t l static bool isProjectStream(SQueryInfo* pQueryInfo) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId != TSDB_FUNC_PRJ) { + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId != TSDB_FUNC_PRJ) { return false; } } @@ -70,7 +70,7 @@ static void setRetryInfo(SSqlStream* pStream, int32_t code) { pSql->res.code = code; int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); - tscDebug("%p stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql, pStream, retryDelayTime); + tscDebug("0x%"PRIx64" stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql->self, pStream, retryDelayTime); tscSetRetryTimer(pStream, pSql, retryDelayTime); } @@ -89,7 +89,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) { return; } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); @@ -101,14 +101,22 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) { return; } + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) { + tscDebug("0x%"PRIx64" empty vgroup list", pSql->self); + pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); + code = TSDB_CODE_TSC_APP_ERROR; + } + // failed to get table Meta or vgroup list, retry in 10sec. if (code == TSDB_CODE_SUCCESS) { tscTansformFuncForSTableQuery(pQueryInfo); - tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name)); + tscDebug("0x%"PRIx64" stream:%p started to query table:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name)); + + pQueryInfo->command = TSDB_SQL_SELECT; pSql->fp = tscProcessStreamQueryCallback; pSql->fetchFp = tscProcessStreamQueryCallback; - tscDoQuery(pSql); + executeQuery(pSql, pQueryInfo); tscIncStreamExecutionCount(pStream); } else { setRetryInfo(pStream, code); @@ -130,8 +138,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { pStream->numOfRes = 0; // reset the numOfRes. SSqlObj *pSql = pStream->pSql; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - tscDebug("%p add into timer", pSql); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); + tscDebug("0x%"PRIx64" timer launch query", pSql->self); if (pStream->isProject) { /* @@ -186,8 +194,8 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf SSqlStream *pStream = (SSqlStream *)param; if (tres == NULL || numOfRows < 0) { int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); - tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, - retryDelay); + tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self, + pStream, numOfRows, retryDelay); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0); @@ -195,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf tNameExtractFullName(&pTableMetaInfo->name, name); taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + + tfree(pTableMetaInfo->pTableMeta); + + tscFreeSqlResult(pStream->pSql); + tscFreeSubobj(pStream->pSql); + tfree(pStream->pSql->pSubs); + pStream->pSql->subState.numOfSub = 0; + pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); tscSetRetryTimer(pStream, pStream->pSql, retryDelay); @@ -208,7 +224,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) { #if 0 SSqlObj * pSql = pStream->pSql; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) { return; @@ -237,7 +253,7 @@ static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) { } if (rowNum > 0) { - tscDebug("%p stream:%p %d rows padded", pSql, pStream, rowNum); + tscDebug("0x%"PRIx64" stream:%p %d rows padded", pSql, pStream, rowNum); } pRes->numOfRows = 0; @@ -251,19 +267,20 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); - tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); + tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pSql->self, pStream, numOfRows, retryDelayTime); tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; } - STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); + STableMetaInfo *pTableMetaInfo = pQueryInfo->pTableMetaInfo[0]; if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful. for(int32_t i = 0; i < numOfRows; ++i) { TAOS_ROW row = taos_fetch_row(res); if (row != NULL) { - tscDebug("%p stream:%p fetch result", pSql, pStream); + tscDebug("0x%"PRIx64" stream:%p fetch result", pSql->self, pStream); tscStreamFillTimeGap(pStream, *(TSKEY*)row[0]); pStream->stime = *(TSKEY *)row[0]; // user callback function @@ -284,7 +301,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; - tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry); + tscError("0x%"PRIx64" stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql->self, pStream, numOfRows, retry); tscSetRetryTimer(pStream, pStream->pSql, retry); return; @@ -293,10 +310,14 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf pStream->stime += 1; } - tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, tNameGetTableName(&pTableMetaInfo->name), + tscDebug("0x%"PRIx64" stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->numOfRes); tfree(pTableMetaInfo->pTableMeta); + if (pQueryInfo->pQInfo != NULL) { + qDestroyQueryInfo(pQueryInfo->pQInfo); + pQueryInfo->pQInfo = NULL; + } tscFreeSqlResult(pSql); tscFreeSubobj(pSql); @@ -318,8 +339,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) /* * current time window will be closed, since it too early to exceed the maxRetentWindow value */ - tscDebug("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", - pStream->pSql, pStream, pStream->stime, pStream->etime); + tscDebug("0x%"PRIx64" stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", + pStream->pSql->self, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here if (pStream->callback) { // Callback function from upper level @@ -329,10 +350,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) return; } - tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream, + tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream, now + timer, timer, delay, pStream->stime, etime); } else { - tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream, + tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream, pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1); } @@ -378,8 +399,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { */ timer = pStream->interval.sliding; if (pStream->stime > pStream->etime) { - tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, - pStream->stime, pStream->etime); + tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", + pStream->pSql->self, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here if (pStream->callback) { // Callback function from upper level @@ -390,9 +411,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { } } else { int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision); - //int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision); if (stime >= pStream->etime) { - tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, + tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here if (pStream->callback) { @@ -402,10 +422,12 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { taos_close_stream(pStream); return; } - - timer = pStream->stime - taosGetTimestamp(pStream->precision); - if (timer < 0) { - timer = 0; + + if (pStream->stime > 0) { + timer = pStream->stime - taosGetTimestamp(pStream->precision); + if (timer < 0) { + timer = 0; + } } } @@ -422,7 +444,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (!pStream->isProject && pQueryInfo->interval.interval == 0) { sprintf(pSql->cmd.payload, "the interval value is 0"); @@ -430,7 +452,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { } if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) { - tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream, + tscWarn("0x%"PRIx64" stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream, (int64_t)pQueryInfo->interval.interval, minIntervalTime); pQueryInfo->interval.interval = minIntervalTime; } @@ -447,14 +469,14 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime; if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) { - tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream, + tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream, pQueryInfo->interval.sliding, minSlidingTime); pQueryInfo->interval.sliding = minSlidingTime; } if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) { - tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream, + tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql->self, pStream, pQueryInfo->interval.sliding, pQueryInfo->interval.interval); pQueryInfo->interval.sliding = pQueryInfo->interval.interval; @@ -472,29 +494,32 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (pStream->isProject) { // no data in table, flush all data till now to destination meter, 10sec delay pStream->interval.interval = tsProjectExecInterval; pStream->interval.sliding = tsProjectExecInterval; - if (stime != 0) { // first projection start from the latest event timestamp + if (stime != INT64_MIN) { // first projection start from the latest event timestamp assert(stime >= pQueryInfo->window.skey); stime += 1; // exclude the last records from table } else { stime = pQueryInfo->window.skey; } } else { // timewindow based aggregation stream - if (stime == 0) { // no data in meter till now + if (stime == INT64_MIN) { // no data in meter till now if (pQueryInfo->window.skey != INT64_MIN) { stime = pQueryInfo->window.skey; + } else { + return stime; } + stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision); } else { int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision); if (newStime != stime) { - tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime); + tscWarn("0x%"PRIx64" stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql->self, pStream, stime, newStime); stime = newStime; } } @@ -525,13 +550,13 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { if (code != TSDB_CODE_SUCCESS) { pSql->res.code = code; - tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code)); + tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code)); pStream->fp(pStream->param, NULL, NULL); return; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -544,7 +569,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) { pSql->res.code = code; - tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream); + tscError("0x%"PRIx64" stream %p open failed, since the interval value is incorrect", pSql->self, pStream); pStream->fp(pStream->param, NULL, NULL); return; } @@ -558,7 +583,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer); - tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql, + tscDebug("0x%"PRIx64" stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr); } @@ -584,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream)); if (pStream == NULL) { - tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); + tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:0x%08x", pSql->self, sqlstr, pCmd->payload, pRes->code); tscFreeSqlObj(pSql); return NULL; } @@ -600,26 +625,26 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p pSql->sqlstr = calloc(1, strlen(sqlstr) + 1); if (pSql->sqlstr == NULL) { - tscError("%p failed to malloc sql string buffer", pSql); + tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tscFreeSqlObj(pSql); return NULL; } strtolower(pSql->sqlstr, sqlstr); - tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); + registerSqlObj(pSql); + + tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); tsem_init(&pSql->rspSem, 0, 0); pSql->fp = tscCreateStream; pSql->fetchFp = tscCreateStream; - registerSqlObj(pSql); - int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_SUCCESS) { tscCreateStream(pStream, pSql, code); } else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(pRes->code)); + tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code)); taosReleaseRef(tscObjRef, pSql->self); free(pStream); return NULL; @@ -645,7 +670,7 @@ void taos_close_stream(TAOS_STREAM *handle) { taosTmrStopA(&(pStream->pTimer)); - tscDebug("%p stream:%p is closed", pSql, pStream); + tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream); // notify CQ to release the pStream object pStream->fp(pStream->param, NULL, NULL); pStream->pSql = NULL; diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 1277a436a14c926b756ff99b0cd2e045f5dfed1f..6928058f2301b05f7eda0b4a2b77f0d0edf0f45f 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) { SSqlObj* pNew = taos_query(pSql->pTscObj, sql); if (pNew == NULL) { - tscError("failed to retrieve table id: cannot create new sql object."); + tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self); return NULL; } else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) { - tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew))); + tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew))); return NULL; } @@ -284,7 +284,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { } size_t numOfTables = taosArrayGetSize(tables); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress)); for( size_t i = 0; i < numOfTables; i++ ) { STidTags* tt = taosArrayGet( tables, i ); @@ -304,7 +304,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { } taosArrayDestroy(tables); - TSDB_QUERY_SET_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY); + TSDB_QUERY_SET_TYPE(tscGetQueryInfo(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY); return 1; } @@ -487,11 +487,13 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { if (pSql == NULL) { return NULL; } + if (pSub->pSql->self != 0) { taosReleaseRef(tscObjRef, pSub->pSql->self); } else { tscFreeSqlObj(pSub->pSql); } + pSub->pSql = pSql; pSql->pSubscription = pSub; } @@ -502,7 +504,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single table subscription size_t size = taosArrayGetSize(pSub->progress); @@ -555,7 +557,10 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { pSql->fp = asyncCallback; pSql->fetchFp = asyncCallback; pSql->param = pSub; - tscDoQuery(pSql); + + pSql->cmd.active = pQueryInfo; + executeQuery(pSql, pQueryInfo); + tsem_wait(&pSub->sem); if (pRes->code != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 484610818e37eaea672d305dec28e0165bf5673c..67eea432e650f4237fd53106470cbaf91d50e7c1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -24,6 +24,7 @@ #include "tschemautil.h" #include "tsclient.h" #include "qUtil.h" +#include "qPlan.h" typedef struct SInsertSupporter { SSqlObj* pSql; @@ -69,7 +70,7 @@ static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, i pthread_mutex_lock(&subState->mutex); - tscDebug("subquery:%p,%d state set to %d", pSql, idx, state); + tscDebug("subquery:0x%"PRIx64",%d state set to %d", pSql->self, idx, state); subState->states[idx] = state; @@ -81,14 +82,20 @@ static bool allSubqueryDone(SSqlObj *pParentSql) { SSubqueryState *subState = &pParentSql->subState; //lock in caller - tscDebug("%p total subqueries: %d", pParentSql, subState->numOfSub); + tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub); for (int i = 0; i < subState->numOfSub; i++) { + SSqlObj* pSub = pParentSql->pSubs[i]; if (0 == subState->states[i]) { - tscDebug("%p subquery:%p, index: %d NOT finished, abort query completion check", pParentSql, pParentSql->pSubs[i], i); + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished, abort query completion check", pParentSql->self, + pSub->self, i); done = false; break; } else { - tscDebug("%p subquery:%p, index: %d finished", pParentSql, pParentSql->pSubs[i], i); + if (pSub != NULL) { + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i); + } else { + tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i); + } } } @@ -105,14 +112,15 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { bool done = allSubqueryDone(pParentSql); if (done) { - tscDebug("%p subquery:%p,%d all subs already done", pParentSql, pSql, idx); + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self, + pSql->self, idx); pthread_mutex_unlock(&subState->mutex); return false; } - tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx); + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d state set to 1", pParentSql->self, pSql->self, idx); subState->states[idx] = 1; @@ -126,7 +134,7 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); win->skey = INT64_MAX; win->ekey = INT64_MIN; @@ -142,7 +150,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { int32_t equalNum = 0; int32_t stackidx = 0; SMergeTsCtx* ctx = NULL; - SMergeTsCtx* pctx = NULL; + SMergeTsCtx* pctx = NULL; SMergeTsCtx* mainCtx = NULL; STSElem cur; STSElem prev; @@ -151,25 +159,26 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { for (int32_t i = 0; i < joinNum; ++i) { STSBuf* output = tsBufCreate(true, pQueryInfo->order.order); - SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); + SQueryInfo* pSubQueryInfo = tscGetQueryInfo(&pSql->pSubs[i]->cmd, 0); pSubQueryInfo->tsBuf = output; - + SJoinSupporter* pSupporter = pSql->pSubs[i]->param; if (pSupporter->pTSBuf == NULL) { - tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); + tscDebug("0x%"PRIx64" at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql->self); return 0; } tsBufResetPos(pSupporter->pTSBuf); if (!tsBufNextPos(pSupporter->pTSBuf)) { - tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql); + tscDebug("0x%"PRIx64" input1 is empty, 0 for secondary query after ts blocks intersecting", pSql->self); return 0; } - tscDebug("%p sub:%p table idx:%d, input group number:%d", pSql, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" table idx:%d, input group number:%d", pSql->self, + pSql->pSubs[i]->self, i, pSupporter->pTSBuf->numOfGroups); ctxlist[i].p = pSupporter; ctxlist[i].res = output; @@ -201,7 +210,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { mainCtx = pctx; - while (1) { + while (1) { pctx = mainCtx; prev = tsBufGetElem(pctx->p->pTSBuf); @@ -217,9 +226,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { int32_t skipped = 0; - for (int32_t i = 1; i < tableNum; ++i) { + for (int32_t i = 1; i < tableNum; ++i) { SMergeTsCtx* tctx = &ctxlist[i]; - + // find the data in supporter2 with the same tag value STSElem e2 = tsBufFindElemStartPosByTag(tctx->p->pTSBuf, &tag); @@ -235,7 +244,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { stackidx = 0; continue; } - + tableMIdx = taosArrayGet(tsCond, ++slot); equalNum = 1; @@ -260,7 +269,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { if (ret == 0) { if (++equalNum < tableNum) { pctx = ctx; - + if (++slot >= tableNum) { slot = 0; } @@ -268,14 +277,14 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { tableMIdx = taosArrayGet(tsCond, slot); continue; } - + assert(stackidx == tableNum); if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { if (win->skey > prev.ts) { win->skey = prev.ts; } - + if (win->ekey < prev.ts) { win->ekey = prev.ts; } @@ -283,8 +292,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { for (int32_t i = 0; i < stackidx; ++i) { SMergeTsCtx* tctx = ctxStack[i]; prev = tsBufGetElem(tctx->p->pTSBuf); - - tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts)); + + tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts)); } } else { pLimit->offset -= 1;//offset apply to projection? @@ -292,11 +301,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { for (int32_t i = 0; i < stackidx; ++i) { SMergeTsCtx* tctx = ctxStack[i]; - + if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) { mergeDone = 1; } - tctx->numOfInput++; + tctx->numOfInput++; } if (mergeDone) { @@ -304,7 +313,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { } stackidx = 0; - equalNum = 1; + equalNum = 1; ctxStack[stackidx++] = pctx; } else if (ret > 0) { @@ -312,15 +321,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { mergeDone = 1; break; } - + ctx->numOfInput++; stackidx--; - } else { + } else { stackidx--; - + for (int32_t i = 0; i < stackidx; ++i) { SMergeTsCtx* tctx = ctxStack[i]; - + if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) { mergeDone = 1; } @@ -331,9 +340,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { break; } - stackidx = 0; + stackidx = 0; equalNum = 1; - + ctxStack[stackidx++] = pctx; } @@ -345,7 +354,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { slot = 0; stackidx = 0; - + skipRemainValue(mainCtx->p->pTSBuf, &tag); } @@ -367,7 +376,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { for (int32_t i = 0; i < joinNum; ++i) { tsBufFlush(ctxlist[i].res); - + tsBufDestroy(ctxlist[i].p->pTSBuf); ctxlist[i].p->pTSBuf = NULL; } @@ -375,11 +384,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { TSKEY et = taosGetTimestampUs(); for (int32_t i = 0; i < joinNum; ++i) { - tscDebug("%p sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us", - pSql, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey, + pSql->self, pSql->pSubs[i]->self, i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey, tsBufGetNumOfGroup(ctxlist[i].res), et - st); - } + } return ctxlist[0].res->numOfTotal; } @@ -395,7 +404,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { pSupporter->pObj = pSql; pSupporter->subqueryIndex = index; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval)); pSupporter->limit = pQueryInfo->limit; @@ -450,25 +459,6 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { free(pSupporter); } -/* - * need the secondary query process - * In case of count(ts)/count(*)/spread(ts) query, that are only applied to - * primary timestamp column , the secondary query is not necessary - * - */ -static UNUSED_FUNC bool needSecondaryQuery(SQueryInfo* pQueryInfo) { - size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumn* base = taosArrayGet(pQueryInfo->colList, i); - if (base->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return true; - } - } - - return false; -} - static void filterVgroupTables(SQueryInfo* pQueryInfo, SArray* pVgroupTables) { int32_t num = 0; int32_t* list = NULL; @@ -549,7 +539,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { assert(numOfSub > 0); // scan all subquery, if one sub query has only ts, ignore it - tscDebug("%p start to launch secondary subqueries, %d out of %d needs to query", pSql, numOfSub, pSql->subState.numOfSub); + tscDebug("0x%"PRIx64" start to launch secondary subqueries, %d out of %d needs to query", pSql->self, numOfSub, pSql->subState.numOfSub); bool success = true; @@ -560,7 +550,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { pSupporter = pPrevSub->param; if (taosArrayGetSize(pSupporter->exprList) == 0) { - tscDebug("%p subIndex: %d, no need to launch query, ignore it", pSql, i); + tscDebug("0x%"PRIx64" subIndex: %d, no need to launch query, ignore it", pSql->self, i); tscDestroyJoinSupporter(pSupporter); taos_free_result(pPrevSub); @@ -569,7 +559,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { continue; } - SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0); + SQueryInfo *pSubQueryInfo = tscGetQueryInfo(&pPrevSub->cmd, 0); STSBuf *pTsBuf = pSubQueryInfo->tsBuf; pSubQueryInfo->tsBuf = NULL; @@ -583,12 +573,11 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { success = false; break; } - tscClearSubqueryInfo(&pNew->cmd); pSql->pSubs[i] = pNew; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pNew->cmd, 0); pQueryInfo->tsBuf = pTsBuf; // transfer the ownership of timestamp comp-z data to the new created object // set the second stage sub query for join process @@ -597,10 +586,11 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond); - pQueryInfo->colList = pSupporter->colList; - pQueryInfo->exprList = pSupporter->exprList; - pQueryInfo->fieldsInfo = pSupporter->fieldsInfo; + pQueryInfo->colList = pSupporter->colList; + pQueryInfo->exprList = pSupporter->exprList; + pQueryInfo->fieldsInfo = pSupporter->fieldsInfo; pQueryInfo->groupbyExpr = pSupporter->groupInfo; + pQueryInfo->pUpstream = taosArrayInit(4, sizeof(POINTER_BYTES)); assert(pNew->subState.numOfSub == 0 && pNew->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1); @@ -620,22 +610,22 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { * during the timestamp intersection. */ pSupporter->limit = pQueryInfo->limit; - pQueryInfo->limit = pSupporter->limit; +// pQueryInfo->limit = pSupporter->limit; SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0); - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); - int16_t funcId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, 0); + int16_t funcId = pExpr->base.functionId; // add the invisible timestamp column - if ((pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) || + if ((pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) || (funcId != TSDB_FUNC_TS && funcId != TSDB_FUNC_TS_DUMMY && funcId != TSDB_FUNC_PRJ)) { int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS; tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL); - tscPrintSelectClause(pNew, 0); + tscPrintSelNodeList(pNew, 0); tscFieldInfoUpdateOffset(pQueryInfo); pExpr = tscSqlExprGet(pQueryInfo, 0); @@ -647,8 +637,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value - pExpr->param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; - pExpr->numOfParams = 1; + pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; + pExpr->base.numOfParams = 1; } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -665,15 +655,15 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { subquerySetState(pPrevSub, &pSql->subState, i, 0); size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); - tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", - pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList), + tscDebug("0x%"PRIx64" subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", + pSql->self, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); } //prepare the subqueries object failed, abort if (!success) { pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql, + tscError("0x%"PRIx64" failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql->self, pSql->subState.numOfSub, pSql->res.code); freeJoinSubqueryObj(pSql); @@ -685,7 +675,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { continue; } - tscDoQuery(pSql->pSubs[i]); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->pSubs[i]->cmd, 0); + executeQuery(pSql->pSubs[i], pQueryInfo); } return TSDB_CODE_SUCCESS; @@ -717,7 +708,7 @@ void freeJoinSubqueryObj(SSqlObj* pSql) { static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) { if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) { - tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code)); + tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code)); freeJoinSubqueryObj(pSqlObj); return 0; } @@ -792,7 +783,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr if (taosArrayGetSize(result) > 0) { SVgroupTableInfo* prevGroup = taosArrayGet(result, taosArrayGetSize(result) - 1); - tscDebug("%p vgId:%d, tables:%"PRIzu, pSql, prevGroup->vgInfo.vgId, taosArrayGetSize(prevGroup->itemList)); + tscDebug("0x%"PRIx64" vgId:%d, tables:%"PRIzu, pSql->self, prevGroup->vgInfo.vgId, taosArrayGetSize(prevGroup->itemList)); } taosArrayPush(result, &info); @@ -801,7 +792,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN}; taosArrayPush(vgTables, &item); - tscTrace("%p tid:%d, uid:%"PRIu64",vgId:%d added", pSql, tt->tid, tt->uid, tt->vgId); + tscTrace("0x%"PRIx64" tid:%d, uid:%"PRIu64",vgId:%d added", pSql->self, tt->tid, tt->uid, tt->vgId); prev = tt; } @@ -814,7 +805,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr pTableMetaInfo->pVgroupTables = result; SVgroupTableInfo* g = taosArrayGet(result, taosArrayGetSize(result) - 1); - tscDebug("%p vgId:%d, tables:%"PRIzu, pSql, g->vgInfo.vgId, taosArrayGetSize(g->itemList)); + tscDebug("0x%"PRIx64" vgId:%d, tables:%"PRIzu, pSql->self, g->vgInfo.vgId, taosArrayGetSize(g->itemList)); } } @@ -823,7 +814,7 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* tscClearSubqueryInfo(pCmd); tscFreeSqlResult(pSql); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0); assert(pQueryInfo->numOfTables == 1); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -842,10 +833,12 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* // set the tags value for ts_comp function if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0); + SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, 0); int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); - pExpr->param->i64 = tagColId; - pExpr->numOfParams = 1; + pExpr->base.param[0].i64 = tagColId; + pExpr->base.param[0].nLen = sizeof(int64_t); + pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; + pExpr->base.numOfParams = 1; } // add the filter tag column @@ -855,7 +848,7 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* for (int32_t i = 0; i < s; ++i) { SColumn *pCol = taosArrayGetP(pSupporter->colList, i); - if (pCol->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. + if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. SColumn *p = tscColumnClone(pCol); taosArrayPush(pQueryInfo->colList, &p); } @@ -865,12 +858,12 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); tscDebug( - "%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, " + "0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, " "numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s", - pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type, + pParent->self, pSql->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type, tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSqlObj* pPSqlObj) { @@ -880,7 +873,7 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq assert(prev->vgId >= 1 && p->vgId >= 1); if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) { - tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj); + tscError("0x%"PRIx64" join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj->self); pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY; return false; } @@ -903,19 +896,19 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); - tscDebug("%p all subquery retrieve complete, do tags match", pParentSql); + tscDebug("0x%"PRIx64" all subquery retrieve complete, do tags match", pParentSql->self); for (int32_t i = 0; i < joinNum; i++) { SJoinSupporter* p = pParentSql->pSubs[i]->param; ctxlist[i].p = p; ctxlist[i].res = taosArrayInit(p->num, size); - + tscDebug("Join %d - num:%d", i, p->num); - + // sort according to the tag valu qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar); - + if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) { for (int32_t j = 0; j <= i; j++) { taosArrayDestroy(ctxlist[j].res); @@ -968,9 +961,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar mergeDone = 0; continue; } - + tableMIdx = taosArrayGet(tagCond, slot); - + pctx = &ctxlist[*tableMIdx]; prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); @@ -980,10 +973,10 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar tableMIdx = taosArrayGet(tagCond, ++slot); equalNum = 1; - + while (1) { ctx = &ctxlist[*tableMIdx]; - + cur = (STidTags*) varDataVal(ctx->p->pIdTagList + ctx->idx * ctx->p->tagSize); assert(cur->tid != 0 && prev->tid != 0); @@ -995,7 +988,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar if (++equalNum < tableNum) { prev = cur; pctx = ctx; - + if (++slot >= tableNum) { slot = 0; } @@ -1004,11 +997,11 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar continue; } - tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, prev->vgId, + tscDebug("0x%"PRIx64" tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql->self, prev->vgId, *(int*) prev->tag, prev->tid, prev->uid, cur->tid, cur->uid); assert(stackidx == tableNum); - + for (int32_t i = 0; i < stackidx; ++i) { SMergeCtx* tctx = ctxStack[i]; prev = (STidTags*) varDataVal(tctx->p->pIdTagList + tctx->idx * tctx->p->tagSize); @@ -1018,7 +1011,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar for (int32_t i = 0; i < stackidx; ++i) { SMergeCtx* tctx = ctxStack[i]; - + if (++tctx->idx >= tctx->p->num) { mergeDone = 1; break; @@ -1031,19 +1024,19 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar stackidx = 0; equalNum = 1; - + prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); ctxStack[stackidx++] = pctx; } else if (ret > 0) { stackidx--; - + if (++ctx->idx >= ctx->p->num) { break; } } else { stackidx--; - + for (int32_t i = 0; i < stackidx; ++i) { SMergeCtx* tctx = ctxStack[i]; if (++tctx->idx >= tctx->p->num) { @@ -1056,9 +1049,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar break; } - stackidx = 0; + stackidx = 0; equalNum = 1; - + prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); ctxStack[stackidx++] = pctx; } @@ -1074,14 +1067,14 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar // reorganize the tid-tag value according to both the vgroup id and tag values // sort according to the tag value size_t num = taosArrayGetSize(ctxlist[i].res); - + qsort((ctxlist[i].res)->pData, num, size, tidTagsCompar); taosArrayPush(resList, &ctxlist[i].res); - tscDebug("%p tags match complete, result num: %"PRIzu, pParentSql, num); + tscDebug("0x%"PRIx64" tags match complete, result num: %"PRIzu, pParentSql->self, num); } - + return TSDB_CODE_SUCCESS; } @@ -1110,13 +1103,13 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); // todo, the type may not include TSDB_QUERY_TYPE_TAG_FILTER_QUERY assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)); if (pParentSql->res.code != TSDB_CODE_SUCCESS) { - tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); + tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code); if (quitAllSubquery(pSql, pParentSql, pSupporter)) { return; } @@ -1131,7 +1124,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // todo retry if other subqueries are not failed assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); - tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex); + tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); pParentSql->res.code = numOfRows; if (quitAllSubquery(pSql, pParentSql, pSupporter)) { @@ -1150,7 +1143,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // todo handle memory error char* tmp = realloc(pSupporter->pIdTagList, length); if (tmp == NULL) { - tscError("%p failed to malloc memory", pSql); + tscError("0x%"PRIx64" failed to malloc memory", pSql->self); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); if (quitAllSubquery(pSql, pParentSql, pSupporter)) { @@ -1183,22 +1176,22 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pTableMetaInfo->vgroupIndex += 1; assert(pTableMetaInfo->vgroupIndex < totalVgroups); - tscDebug("%p tid_tag from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d", - pSql, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num); + tscDebug("0x%"PRIx64" tid_tag from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d", + pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num); pCmd->command = TSDB_SQL_SELECT; tscResetForNextRetrieve(&pSql->res); // set the callback function pSql->fp = tscJoinQueryCallback; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return; } // no data exists in next vnode, mark the query completed // only when there is no subquery exits any more, proceeds to get the intersect of the tuple sets. if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) { - tscDebug("%p tagRetrieve:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub); + tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub); return; } @@ -1217,7 +1210,8 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (emptyTagList(resList, pParentSql->subState.numOfSub)) { // no results,return. assert(pParentSql->fp != tscJoinQueryCallback); - tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); + tscDebug("0x%"PRIx64" tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", + pParentSql->self); freeJoinSubqueryObj(pParentSql); // set no result command @@ -1230,16 +1224,16 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // proceed to for ts_comp query SSqlCmd* pSubCmd = &pParentSql->pSubs[m]->cmd; SArray** s = taosArrayGet(resList, m); - - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + + SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pSubCmd, 0); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0); tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo, *s); - + SSqlObj* psub = pParentSql->pSubs[m]; ((SJoinSupporter*)psub->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo->pVgroupTables); - + memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub); - tscDebug("%p reset all sub states to 0", pParentSql); + tscDebug("0x%"PRIx64" reset all sub states to 0", pParentSql->self); issueTsCompQuery(psub, psub->param, pParentSql); } @@ -1265,11 +1259,11 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)); if (pParentSql->res.code != TSDB_CODE_SUCCESS) { - tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); + tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code); if (quitAllSubquery(pSql, pParentSql, pSupporter)){ return; } @@ -1283,7 +1277,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // todo retry if other subqueries are not failed yet assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); - tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex); + tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); pParentSql->res.code = numOfRows; if (quitAllSubquery(pSql, pParentSql, pSupporter)){ @@ -1299,7 +1293,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pSupporter->f = fopen(pSupporter->path, "wb"); if (pSupporter->f == NULL) { - tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno)); + tscError("0x%"PRIx64" failed to create tmp file:%s, reason:%s", pSql->self, pSupporter->path, strerror(errno)); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); @@ -1319,7 +1313,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); if (pBuf == NULL) { // in error process, close the fd - tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows); + tscError("0x%"PRIx64" invalid ts comp file from vnode, abort subquery, file size:%d", pSql->self, numOfRows); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); if (quitAllSubquery(pSql, pParentSql, pSupporter)){ @@ -1332,7 +1326,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow } if (pSupporter->pTSBuf == NULL) { - tscDebug("%p create tmp file for ts block:%s, size:%d bytes", pSql, pBuf->path, numOfRows); + tscDebug("0x%"PRIx64" create tmp file for ts block:%s, size:%d bytes", pSql->self, pBuf->path, numOfRows); pSupporter->pTSBuf = pBuf; } else { assert(pQueryInfo->numOfTables == 1); // for subquery, only one @@ -1358,8 +1352,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pTableMetaInfo->vgroupIndex += 1; assert(pTableMetaInfo->vgroupIndex < totalVgroups); - tscDebug("%p results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64, - pSql, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, + tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64, + pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal); pCmd->command = TSDB_SQL_SELECT; @@ -1374,7 +1368,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // set the callback function pSql->fp = tscJoinQueryCallback; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return; } @@ -1382,12 +1376,12 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow return; } - tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql); + tscDebug("0x%"PRIx64" all subquery retrieve ts complete, do ts block intersect", pParentSql->self); STimeWindow win = TSWINDOW_INITIALIZER; int64_t num = doTSBlockIntersect(pParentSql, &win); if (num <= 0) { // no result during ts intersect - tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql); + tscDebug("0x%"PRIx64" no results generated in ts intersection, free all sub SqlObj and quit", pParentSql->self); freeJoinSubqueryObj(pParentSql); // set no result command @@ -1397,7 +1391,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow } // launch the query the retrieve actual results from vnode along with the filtered timestamp - SQueryInfo* pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); + SQueryInfo* pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd, pParentSql->cmd.clauseIndex); updateQueryTimeRange(pPQueryInfo, &win); //update the vgroup that involved in real data query @@ -1413,10 +1407,10 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); if (pParentSql->res.code != TSDB_CODE_SUCCESS) { - tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); + tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code); if (quitAllSubquery(pSql, pParentSql, pSupporter)) { return; } @@ -1431,7 +1425,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR assert(numOfRows == taos_errno(pSql)); pParentSql->res.code = numOfRows; - tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows)); + tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows)); tscAsyncResultOnError(pParentSql); return; @@ -1455,23 +1449,23 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR } if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { - tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSql, pTableMetaInfo->vgroupIndex); + tscDebug("0x%"PRIx64" no result in current vnode anymore, try next vnode, vgIndex:%d", pSql->self, pTableMetaInfo->vgroupIndex); pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return; } else { - tscDebug("%p no result in current subquery anymore", pSql); + tscDebug("0x%"PRIx64" no result in current subquery anymore", pSql->self); } } if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) { - tscDebug("%p sub:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pState->numOfSub); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub); return; } - tscDebug("%p all %d secondary subqueries retrieval completed, code:%d", tres, pState->numOfSub, pParentSql->res.code); + tscDebug("0x%"PRIx64" all %d secondary subqueries retrieval completed, code:%d", pSql->self, pState->numOfSub, pParentSql->res.code); if (pParentSql->res.code != TSDB_CODE_SUCCESS) { freeJoinSubqueryObj(pParentSql); @@ -1482,23 +1476,23 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR bool stableQuery = tscIsTwoStageSTableQuery(pQueryInfo, 0); for (int32_t i = 0; i < pState->numOfSub; ++i) { if (pParentSql->pSubs[i] == NULL) { - tscDebug("%p %p sub:%d not retrieve data", pParentSql, NULL, i); + tscDebug("0x%"PRIx64" %p sub:%d not retrieve data", pParentSql->self, NULL, i); continue; } SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; if (pRes1->row > 0 && pRes1->numOfRows > 0) { - tscDebug("%p sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql, pParentSql->pSubs[i], i, - pRes1->numOfRows, pRes1->numOfTotal); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, + pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); assert(pRes1->row < pRes1->numOfRows); } else { if (!stableQuery) { pRes1->numOfClauseTotal += pRes1->numOfRows; } - tscDebug("%p sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql, pParentSql->pSubs[i], i, - pRes1->numOfRows, pRes1->numOfTotal); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self, + pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); } } @@ -1522,8 +1516,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { SSqlRes *pRes = &pSub->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); - + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSub->cmd, 0); if (!tscHasReachLimitation(pQueryInfo, pRes)) { if (pRes->row >= pRes->numOfRows) { // no data left in current result buffer @@ -1575,7 +1568,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { continue; } - SQueryInfo* p = tscGetQueryInfoDetail(&pSub->cmd, 0); + SQueryInfo* p = tscGetQueryInfo(&pSub->cmd, 0); orderedPrjQuery = tscNonOrderedProjectionQueryOnSTable(p, 0); if (orderedPrjQuery) { break; @@ -1599,7 +1592,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { continue; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSub->cmd, 0); if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) { @@ -1615,15 +1608,15 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { } if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { - tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSub, + tscDebug("0x%"PRIx64" no result in current vnode anymore, try next vnode, vgIndex:%d", pSub->self, pTableMetaInfo->vgroupIndex); pSub->cmd.command = TSDB_SQL_SELECT; pSub->fp = tscJoinQueryCallback; - tscProcessSql(pSub); + tscBuildAndSendRequest(pSub, NULL); tryNextVnode = true; } else { - tscDebug("%p no result in current subquery anymore", pSub); + tscDebug("0x%"PRIx64" no result in current subquery anymore", pSub->self); } } } @@ -1646,7 +1639,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled // retrieve data from current vnode. - tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch); + tscDebug("0x%"PRIx64" retrieve data from %d subqueries", pSql->self, numOfFetch); SJoinSupporter* pSupporter = NULL; for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { @@ -1674,13 +1667,13 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { pSupporter = (SJoinSupporter*)pSql1->param; // wait for all subqueries completed - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd1, 0); assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (pRes1->row >= pRes1->numOfRows) { - tscDebug("%p subquery:%p retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql, pSql1, + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1->self, pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex); tscResetForNextRetrieve(pRes1); @@ -1690,7 +1683,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - tscProcessSql(pSql1); + tscBuildAndSendRequest(pSql1, NULL); } } } @@ -1700,13 +1693,12 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - // the column transfer support struct has been built if (pRes->pColumnIndex != NULL) { return; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs); @@ -1716,12 +1708,12 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t tableIndexOfSub = -1; for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, j); - if (pTableMetaInfo->pTableMeta->id.uid == pExpr->uid) { + if (pTableMetaInfo->pTableMeta->id.uid == pExpr->base.uid) { tableIndexOfSub = j; break; } @@ -1730,12 +1722,12 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables); SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd; - SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); + SQueryInfo* pSubQueryInfo = tscGetQueryInfo(pSubCmd, 0); size_t numOfSubExpr = taosArrayGetSize(pSubQueryInfo->exprList); for (int32_t k = 0; k < numOfSubExpr; ++k) { - SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); - if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) { + SExprInfo* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); + if (pExpr->base.functionId == pSubExpr->base.functionId && pExpr->base.colInfo.colId == pSubExpr->base.colInfo.colId) { pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k}; break; } @@ -1754,14 +1746,14 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { SSqlObj* pParentSql = pSupporter->pObj; // There is only one subquery and table for each subquery. - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); assert(pQueryInfo->numOfTables == 1 && pSql->cmd.numOfClause == 1); // retrieve actual query results from vnode during the second stage join subquery if (pParentSql->res.code != TSDB_CODE_SUCCESS) { - tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code); + tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, code, pParentSql->res.code); if (quitAllSubquery(pSql, pParentSql, pSupporter)) { return; } @@ -1775,7 +1767,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { assert(taos_errno(pSql) == code); - tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code)); + tscError("0x%"PRIx64" abort query, code:%s, global code:%s", pSql->self, tstrerror(code), tstrerror(pParentSql->res.code)); pParentSql->res.code = code; if (quitAllSubquery(pSql, pParentSql, pSupporter)) { @@ -1791,7 +1783,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) { pSql->fp = tidTagRetrieveCallback; pSql->cmd.command = TSDB_SQL_FETCH; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return; } @@ -1799,7 +1791,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { pSql->fp = tsCompRetrieveCallback; pSql->cmd.command = TSDB_SQL_FETCH; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); return; } @@ -1820,7 +1812,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data pSql->cmd.command = TSDB_SQL_FETCH; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } else { // first retrieve from vnode during the secondary stage sub-query // set the command flag must be after the semaphore been correctly set. if (pParentSql->res.code == TSDB_CODE_SUCCESS) { @@ -1838,7 +1830,7 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) { SSqlCmd * pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); pSql->res.qId = 0x1; assert(pSql->res.numOfRows == 0); @@ -1861,15 +1853,14 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter addGroupInfoForSubquery(pSql, pNew, 0, tableIndex); // refactor as one method - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd, 0); assert(pNewQueryInfo != NULL); // update the table index - size_t num = taosArrayGetSize(pNewQueryInfo->colList); - for (int32_t i = 0; i < num; ++i) { - SColumn* pCol = taosArrayGetP(pNewQueryInfo->colList, i); - pCol->colIndex.tableIndex = 0; - } +// size_t num = taosArrayGetSize(pNewQueryInfo->colList); +// for (int32_t i = 0; i < num; ++i) { +// SColumn* pCol = taosArrayGetP(pNewQueryInfo->colList, i); +// } pSupporter->colList = pNewQueryInfo->colList; pNewQueryInfo->colList = NULL; @@ -1894,6 +1885,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter pNewQueryInfo->limit.limit = -1; pNewQueryInfo->limit.offset = 0; + taosArrayDestroy(pNewQueryInfo->pUpstream); pNewQueryInfo->order.orderColId = INT32_MIN; @@ -1940,12 +1932,12 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL); // set the tags value for ts_comp function - SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); + SExprInfo *pExpr = tscSqlExprGet(pNewQueryInfo, 0); if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); - pExpr->param->i64 = tagColId; - pExpr->numOfParams = 1; + pExpr->base.param->i64 = tagColId; + pExpr->base.numOfParams = 1; } // add the filter tag column @@ -1955,7 +1947,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter for (int32_t i = 0; i < s; ++i) { SColumn *pCol = taosArrayGetP(pSupporter->colList, i); - if (pCol->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. + if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. SColumn *p = tscColumnClone(pCol); taosArrayPush(pNewQueryInfo->colList, &p); } @@ -1972,7 +1964,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter } } else { assert(0); - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd, 0); pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; } @@ -1983,7 +1975,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0); int32_t code = TSDB_CODE_SUCCESS; @@ -2000,17 +1992,13 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { } memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub); - tscDebug("%p reset all sub states to 0", pSql); - - bool hasEmptySub = false; + tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self); - tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables); + tscDebug("0x%"PRIx64" start subquery, total:%d", pSql->self, pQueryInfo->numOfTables); for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { - SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i); - if (pSupporter == NULL) { // failed to create support struct, abort current query - tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i); + tscError("0x%"PRIx64" tableIndex:%d, failed to allocate join support object, abort further query", pSql->self, i); code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } @@ -2024,14 +2012,13 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { SSqlObj* pSub = pSql->pSubs[i]; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSub->cmd, 0, 0); if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->vgroupList->numOfVgroups == 0)) { - hasEmptySub = true; + pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; break; } } - if (hasEmptySub) { // at least one subquery is empty, do nothing and return + if (pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { // at least one subquery is empty, do nothing and return freeJoinSubqueryObj(pSql); - pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; (*pSql->fp)(pSql->param, pSql, 0); } else { int fail = 0; @@ -2042,7 +2029,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { continue; } - if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) { + if ((code = tscBuildAndSendRequest(pSub, NULL)) != TSDB_CODE_SUCCESS) { pRes->code = code; (*pSub->fp)(pSub->param, pSub, 0); fail = 1; @@ -2111,12 +2098,12 @@ typedef struct SFirstRoundQuerySup { void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, SQueryInfo* pQueryInfo) { TSKEY key = INT64_MIN; for(int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag) || pExpr->base.functionId == TSDB_FUNC_PRJ) { continue; } - if (pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { key = *(TSKEY*) row[i]; continue; } @@ -2128,7 +2115,7 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S SET_DOUBLE_NULL(&v); } - int32_t id = pExpr->colInfo.colId; + int32_t id = pExpr->base.colInfo.colId; int32_t numOfQueriedCols = (int32_t) taosArrayGetSize(pInterResult->pResult); SArray* p = NULL; @@ -2172,7 +2159,7 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { SFirstRoundQuerySup* pSup = param; SSqlObj* pParent = pSup->pParent; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); int32_t code = taos_errno(pSql); if (code != TSDB_CODE_SUCCESS) { @@ -2204,16 +2191,16 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { int32_t offset = 0; for (int32_t i = 0; i < numOfCols && offset < pSup->tagLen; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); // tag or group by column - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { + if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag) || pExpr->base.functionId == TSDB_FUNC_PRJ) { if (row[i] == NULL) { - setNull(p + offset, pExpr->resType, pExpr->resBytes); + setNull(p + offset, pExpr->base.resType, pExpr->base.resBytes); } else { memcpy(p + offset, row[i], length[i]); } - offset += pExpr->resBytes; + offset += pExpr->base.resBytes; } } @@ -2248,14 +2235,14 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { } } - if (!pRes->completed) { + if (!pRes->completed && numOfRows > 0) { taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param); return; } // set the parameters for the second round query process SSqlCmd *pPCmd = &pParent->cmd; - SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0); + SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pPCmd, 0); int32_t resRows = pSup->numOfRows; if (pSup->numOfRows > 0) { @@ -2282,7 +2269,7 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { } pQueryInfo1->round = 1; - tscDoQuery(pParent); + executeQuery(pParent, pQueryInfo1); } void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) { @@ -2305,7 +2292,7 @@ void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) { } int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo* pTableMetaInfo1 = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); SFirstRoundQuerySup *pSup = calloc(1, sizeof(SFirstRoundQuerySup)); @@ -2321,7 +2308,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { tscClearSubqueryInfo(pCmd); tscFreeSqlResult(pSql); - SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pCmd, 0); assert(pQueryInfo->numOfTables == 1); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); @@ -2350,68 +2337,66 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { int32_t index = 0; for(int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { - taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { + taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId); SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); - SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL); - p->resColId = pExpr->resColId; // update the result column id - } else if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) { - taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL); + p->base.resColId = pExpr->base.resColId; // update the result column id + } else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) { + taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId); - SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex}; + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->base.colInfo.colIndex}; SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)}; - tstrncpy(schema.name, pExpr->aliasName, tListLen(schema.name)); + tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name)); - SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL); - p->resColId = pExpr->resColId; // update the result column id - } else if (pExpr->functionId == TSDB_FUNC_TAG) { - pSup->tagLen += pExpr->resBytes; - SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex}; + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL); + p->base.resColId = pExpr->base.resColId; // update the result column id + } else if (pExpr->base.functionId == TSDB_FUNC_TAG) { + pSup->tagLen += pExpr->base.resBytes; + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->base.colInfo.colIndex}; SSchema* schema = NULL; - if (pExpr->colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) { - schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + if (pExpr->base.colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) { + schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); } else { schema = tGetTbnameColumnSchema(); } - SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG); - p->resColId = pExpr->resColId; - } else if (pExpr->functionId == TSDB_FUNC_PRJ) { + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG); + p->base.resColId = pExpr->base.resColId; + } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) { int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); for(int32_t k = 0; k < num; ++k) { SColIndex* pIndex = taosArrayGet(pNewQueryInfo->groupbyExpr.columnInfo, k); - if (pExpr->colInfo.colId == pIndex->colId) { - pSup->tagLen += pExpr->resBytes; - taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + if (pExpr->base.colInfo.colId == pIndex->colId) { + pSup->tagLen += pExpr->base.resBytes; + taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId); SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pIndex->colIndex}; - SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); //doLimitOutputNormalColOfGroupby - SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL); - p->numOfParams = 1; - p->param[0].i64 = 1; - p->param[0].nType = TSDB_DATA_TYPE_INT; - p->resColId = pExpr->resColId; // update the result column id + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL); + p->base.numOfParams = 1; + p->base.param[0].i64 = 1; + p->base.param[0].nType = TSDB_DATA_TYPE_INT; + p->base.resColId = pExpr->base.resColId; // update the result column id } } } } - SColumnIndex columnIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscInsertPrimaryTsSourceColumn(pNewQueryInfo, &columnIndex); - + tscInsertPrimaryTsSourceColumn(pNewQueryInfo, pTableMetaInfo->pTableMeta->id.uid); tscTansformFuncForSTableQuery(pNewQueryInfo); tscDebug( - "%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " + "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " "numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s", - pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, + pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); tscHandleMasterSTableQuery(pNew); @@ -2444,7 +2429,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { const uint32_t nBufferSize = (1u << 16u); // 64KB - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSubqueryState *pState = &pSql->subState; @@ -2465,7 +2450,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return ret; } - tscDebug("%p retrieved query data from %d vnode(s)", pSql, pState->numOfSub); + tscDebug("0x%"PRIx64" retrieved query data from %d vnode(s)", pSql->self, pState->numOfSub); pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); if (pSql->pSubs == NULL) { tfree(pSql->pSubs); @@ -2489,7 +2474,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { } memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub); - tscDebug("%p reset all sub states to 0", pSql); + tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self); pRes->code = TSDB_CODE_SUCCESS; @@ -2497,7 +2482,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { for (; i < pState->numOfSub; ++i) { SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); if (trs == NULL) { - tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tscError("0x%"PRIx64" failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); break; } @@ -2506,7 +2491,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); if (trs->localBuffer == NULL) { - tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); tfree(trs); break; } @@ -2518,7 +2503,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); if (pNew == NULL) { - tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); tfree(trs->localBuffer); tfree(trs); break; @@ -2526,16 +2511,17 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { // todo handle multi-vnode situation if (pQueryInfo->tsBuf) { - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd, 0); pNewQueryInfo->tsBuf = tsBufClone(pQueryInfo->tsBuf); assert(pNewQueryInfo->tsBuf != NULL); } - tscDebug("%p sub:%p create subquery success. orderOfSub:%d", pSql, pNew, trs->subqueryIndex); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self, + trs->subqueryIndex); } if (i < pState->numOfSub) { - tscError("%p failed to prepare subquery structure and launch subqueries", pSql); + tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub); @@ -2553,8 +2539,8 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SSqlObj* pSub = pSql->pSubs[j]; SRetrieveSupport* pSupport = pSub->param; - tscDebug("%p sub:%p launch subquery, orderOfSub:%d.", pSql, pSub, pSupport->subqueryIndex); - tscProcessSql(pSub); + tscDebug("0x%"PRIx64" sub:%p launch subquery, orderOfSub:%d.", pSql->self, pSub, pSupport->subqueryIndex); + tscBuildAndSendRequest(pSub, NULL); } return TSDB_CODE_SUCCESS; @@ -2565,11 +2551,11 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); if (p == NULL) { - tscDebug("%p retrieve supp already released", pSql); + tscDebug("0x%"PRIx64" retrieve supp already released", pSql->self); return; } - tscDebug("%p start to free subquery supp obj:%p", pSql, trsupport); + tscDebug("0x%"PRIx64" start to free subquery supp obj:%p", pSql->self, trsupport); tfree(trsupport->localBuffer); tfree(trsupport); } @@ -2579,7 +2565,7 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) { // set no disk space error info - tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code)); + tscError("sub:0x%"PRIx64" failed to flush data to disk, reason:%s", ((SSqlObj *)tres)->self, tstrerror(code)); SSqlObj* pParentSql = trsupport->pParentSql; pParentSql->res.code = code; @@ -2604,7 +2590,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 const uint32_t nBufferSize = (1u << 16u); // 64KB trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); if (trsupport->localBuffer == NULL) { - tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno)); + tscError("0x%"PRIx64" failed to malloc buffer for local buffer, reason:%s", pSql->self, strerror(errno)); tfree(trsupport); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2619,13 +2605,13 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 // clear local saved number of results trsupport->localBuffer->num = 0; - tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql, + tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql->self, pSql->self, tstrerror(code), subqueryIndex, trsupport->numOfRetry); SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); if (pNew == NULL) { - tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", - oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); + tscError("0x%"PRIx64" sub:0x%"PRIx64" failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", + oriTrs->pParentSql->self, pSql->self, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); pParentSql->res.code = terrno; oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -2634,7 +2620,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 return pParentSql->res.code; } - int32_t ret = tscProcessSql(pNew); + int32_t ret = tscBuildAndSendRequest(pNew, NULL); *sent = 1; @@ -2673,13 +2659,13 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO */ pSql->res.numOfRows = 0; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts - tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%s", pParentSql, pSql, + tscDebug("0x%"PRIx64" query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%s", pParentSql->self, pSql, subqueryIndex, tstrerror(pParentSql->res.code)); } if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. - tscDebug("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pParentSql, pSql, numOfRows, subqueryIndex); - tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql, + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex); + tscError("0x%"PRIx64" sub:0x%"PRIx64" abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql->self, pSql->self, subqueryIndex, tstrerror(pParentSql->res.code)); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { @@ -2691,20 +2677,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } } else { // reach the maximum retry count, abort atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); - tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql, pSql, + tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self, tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code)); } } if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) { - tscDebug("%p sub:%p,%d freed, not finished, total:%d", pParentSql, pSql, trsupport->subqueryIndex, pState->numOfSub); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d freed, not finished, total:%d", pParentSql->self, + pSql->self, trsupport->subqueryIndex, pState->numOfSub); tscFreeRetrieveSup(pSql); return; } // all subqueries are failed - tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub, + tscError("0x%"PRIx64" retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql->self, pState->numOfSub, tstrerror(pParentSql->res.code)); // release allocated resource @@ -2714,7 +2701,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscFreeRetrieveSup(pSql); // in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd, 0); if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); @@ -2731,15 +2718,15 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; SSubqueryState* pState = &pParentSql->subState; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0]; // data in from current vnode is stored in cache and disk uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num); SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList; - tscDebug("%p sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql, - vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self, + pSql->self, vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx); tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity); @@ -2752,7 +2739,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p #endif if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { - tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql, + tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self, tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace); tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE); return; @@ -2767,7 +2754,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p } if (!subAndCheckDone(pSql, pParentSql, idx)) { - tscDebug("%p sub:%p orderOfSub:%d freed, not finished", pParentSql, pSql, trsupport->subqueryIndex); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d freed, not finished", pParentSql->self, pSql->self, + trsupport->subqueryIndex); tscFreeRetrieveSup(pSql); return; @@ -2776,14 +2764,14 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p // all sub-queries are returned, start to local merge process pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage; - tscDebug("%p retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree", pParentSql, - pState->numOfSub, pState->numOfRetrievedRows); + tscDebug("0x%"PRIx64" retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree", + pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); - SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0); + SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd, 0); tscClearInterpInfo(pPQueryInfo); tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql); - tscDebug("%p build loser tree completed", pParentSql); + tscDebug("0x%"PRIx64" build loser tree completed", pParentSql->self); pParentSql->res.precision = pSql->res.precision; pParentSql->res.numOfRows = 0; @@ -2792,7 +2780,10 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p tscFreeRetrieveSup(pSql); // set the command flag must be after the semaphore been correctly set. - pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE; + if (pParentSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) { + pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE; + } + if (pParentSql->res.code == TSDB_CODE_SUCCESS) { (*pParentSql->fp)(pParentSql->param, pParentSql, 0); } else { @@ -2807,7 +2798,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR // this query has been freed already SRetrieveSupport *trsupport = (SRetrieveSupport *)param; if (pSql->param == NULL || param == NULL) { - tscDebug("%p already freed in dnodecallback", pSql); + tscDebug("0x%"PRIx64" already freed in dnodecallback", pSql->self); return; } @@ -2822,8 +2813,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; - tscDebug("%p query cancelled/failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s", - pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(numOfRows), tstrerror(pParentSql->res.code)); + tscDebug("0x%"PRIx64" query cancelled/failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s", + pParentSql->self, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(numOfRows), tstrerror(pParentSql->res.code)); tscHandleSubqueryError(param, tres, numOfRows); return; @@ -2837,7 +2828,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR } if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { - tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); + tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(numOfRows), trsupport->numOfRetry); int32_t sent = 0; @@ -2846,7 +2837,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR return; } } else { - tscDebug("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows)); + tscDebug("0x%"PRIx64" sub:%p reach the max retry times, set global code:%s", pParentSql->self, pSql, tstrerror(numOfRows)); atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); // set global code and abort } @@ -2855,18 +2846,18 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR } SSqlRes * pRes = &pSql->res; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); if (numOfRows > 0) { assert(pRes->numOfRows == numOfRows); int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows); - tscDebug("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", pParentSql, pSql, - pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx); + tscDebug("0x%"PRIx64" sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", + pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx); if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { - tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64, - pParentSql, pSql, tsMaxNumOfOrderedResults, num); + tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64, + pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num); tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY); return; } @@ -2881,7 +2872,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR // no disk space for tmp directory if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { - tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql, + tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self, tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace); tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE); return; @@ -2907,8 +2898,9 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, TSDB_SQL_SELECT, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pNew->cmd, 0); + pNew->cmd.active = pQueryInfo; pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; // clear the limit/offset info, since it should not be sent to vnode to be executed. @@ -2941,7 +2933,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SSqlObj* pParentSql = trsupport->pParentSql; SSqlObj* pSql = (SSqlObj *) tres; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0); assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); @@ -2950,8 +2942,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { // stable query killed or other subquery failed, all query stopped if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; - tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s", - pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code)); + tscError("0x%"PRIx64" query cancelled or failed, sub:0x%"PRIx64", vgId:%d, orderOfSub:%d, code:%s, global code:%s", + pParentSql->self, pSql->self, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code)); tscHandleSubqueryError(param, tres, code); return; @@ -2968,7 +2960,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { assert(code == taos_errno(pSql)); if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { - tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); + tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry); int32_t sent = 0; @@ -2977,7 +2969,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { return; } } else { - tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code)); + tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code)); atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort } @@ -2985,8 +2977,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { return; } - tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql, - pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql->self, + pSql->self, pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex); if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode tscRetrieveFromDnodeCallBack(param, pSql, 0); @@ -2997,7 +2989,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) { if (pParentObj->retry > pParentObj->maxRetry) { - tscError("%p max retry reached, abort the retry effort", pParentObj); + tscError("0x%"PRIx64" max retry reached, abort the retry effort", pParentObj->self); return false; } @@ -3049,7 +3041,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) { - tscDebug("%p insert:%p,%d completed, total:%d", pParentObj, tres, pSupporter->index, pParentObj->subState.numOfSub); + tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, pSupporter->index, pParentObj->subState.numOfSub); return; } @@ -3059,7 +3051,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) doFreeInsertSupporter(pParentObj); if (pParentObj->res.code == TSDB_CODE_SUCCESS) { - tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); + tscDebug("0x%"PRIx64" Async insertion completed, total inserted:%d", pParentObj->self, pParentObj->res.numOfRows); // todo remove this parameter in async callback function definition. // all data has been sent to vnode, call user function @@ -3079,26 +3071,27 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) // clean up tableMeta in cache tscFreeQueryInfo(&pSql->cmd, false); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoS(&pSql->cmd, 0); STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0); tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); subquerySetState(pSql, &pParentObj->subState, i, 0); - tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql); + tscDebug("0x%"PRIx64", failed sub:%d, %p", pParentObj->self, i, pSql); } } - tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj, + tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self, pParentObj->res.numOfRows, numOfFailed, numOfSub); - tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables); + tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.numOfTables); for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name); taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } + pParentObj->res.code = TSDB_CODE_SUCCESS; pParentObj->cmd.parseFinished = false; tscResetSqlCmd(&pParentObj->cmd, false); @@ -3106,7 +3099,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) // in case of insert, redo parsing the sql string and build new submit data block for two reasons: // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. // 2. vnode may need the schema information along with submit block to update its local table schema. - tscDebug("%p re-parse sql to generate submit data, retry:%d", pParentObj, pParentObj->retry); + tscDebug("0x%"PRIx64" re-parse sql to generate submit data, retry:%d", pParentObj->self, pParentObj->retry); pParentObj->retry++; int32_t code = tsParseSql(pParentObj, true); @@ -3118,7 +3111,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) return; } - tscDoQuery(pParentObj); + tscHandleMultivnodeInsert(pParentObj); } } @@ -3142,7 +3135,7 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) { return code; // here the pSql may have been released already. } - return tscProcessSql(pSql); + return tscBuildAndSendRequest(pSql, NULL); } int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { @@ -3158,7 +3151,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { pSup->pSql = pSql; pSub->param = pSup; - tscDebug("%p sub:%p launch sub insert, orderOfSub:%d", pSql, pSub, i); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch sub insert, orderOfSub:%d", pSql->self, pSub->self, i); if (pSub->res.code != TSDB_CODE_SUCCESS) { tscHandleInsertRetry(pSql, pSub); } @@ -3186,14 +3179,14 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { } memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub); - tscDebug("%p reset all sub states to 0", pSql); + tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); if (pSql->pSubs == NULL) { goto _error; } - tscDebug("%p submit data to %d vnode(s)", pSql, pSql->subState.numOfSub); + tscDebug("0x%"PRIx64" submit data to %d vnode(s)", pSql->self, pSql->subState.numOfSub); while(numOfSub < pSql->subState.numOfSub) { SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter)); @@ -3206,7 +3199,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT); if (pNew == NULL) { - tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, numOfSub, strerror(errno)); + tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, numOfSub, strerror(errno)); goto _error; } @@ -3220,17 +3213,17 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, numOfSub); pRes->code = tscCopyDataBlockToPayload(pNew, pTableDataBlock); if (pRes->code == TSDB_CODE_SUCCESS) { - tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub); + tscDebug("0x%"PRIx64" sub:%p create subObj success. orderOfSub:%d", pSql->self, pNew, numOfSub); numOfSub++; } else { - tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%s", pSql, numOfSub, + tscDebug("0x%"PRIx64" prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%s", pSql->self, numOfSub, pSql->subState.numOfSub, tstrerror(pRes->code)); goto _error; } } if (numOfSub < pSql->subState.numOfSub) { - tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql); + tscError("0x%"PRIx64" failed to prepare subObj structure and launch sub-insertion", pSql->self); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } @@ -3240,8 +3233,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { // use the local variable for (int32_t j = 0; j < numOfSub; ++j) { SSqlObj *pSub = pSql->pSubs[j]; - tscDebug("%p sub:%p launch sub insert, orderOfSub:%d", pSql, pSub, j); - tscProcessSql(pSub); + tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, j); + tscBuildAndSendRequest(pSub, NULL); } return TSDB_CODE_SUCCESS; @@ -3251,21 +3244,23 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { } static char* getResultBlockPosition(SSqlCmd* pCmd, SSqlRes* pRes, int32_t columnIndex, int16_t* bytes) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, columnIndex); - assert(pInfo->pSqlExpr != NULL); + assert(pInfo->pExpr->pExpr == NULL); - *bytes = pInfo->pSqlExpr->resBytes; - char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + pRes->row * (*bytes); - - return pData; + *bytes = pInfo->pExpr->base.resBytes; + if (pRes->data != NULL) { + return pRes->data + pInfo->pExpr->base.offset * pRes->numOfRows + pRes->row * (*bytes); + } else { + return ((char*)pRes->urow[columnIndex]) + pRes->row * (*bytes); + } } static void doBuildResFromSubqueries(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); int32_t numOfRes = INT32_MAX; for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { @@ -3283,6 +3278,7 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { return; } + tscRestoreFuncForSTableQuery(pQueryInfo); int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); assert(numOfRes * rowSize > 0); @@ -3302,6 +3298,19 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { int16_t bytes = 0; + tscRestoreFuncForSTableQuery(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + SQueryInfo* pSubQueryInfo = pSub->cmd.pQueryInfo[0]; + tscRestoreFuncForSTableQuery(pSubQueryInfo); + tscFieldInfoUpdateOffset(pSubQueryInfo); + } + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for(int32_t i = 0; i < numOfExprs; ++i) { SColumnIndex* pIndex = &pRes->pColumnIndex[i]; @@ -3348,7 +3357,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } if (pRes->tsrow == NULL) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); pRes->numOfCols = (int16_t) tscSqlExprNumOfExprs(pQueryInfo); pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES); @@ -3378,18 +3387,18 @@ char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) { SArithmeticSupport *pSupport = (SArithmeticSupport *) param; int32_t index = -1; - SSqlExpr* pExpr = NULL; + SExprInfo* pExpr = NULL; for (int32_t i = 0; i < pSupport->numOfCols; ++i) { pExpr = taosArrayGetP(pSupport->exprList, i); - if (strncmp(name, pExpr->aliasName, sizeof(pExpr->aliasName) - 1) == 0) { + if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) { index = i; break; } } assert(index >= 0 && index < pSupport->numOfCols); - return pSupport->data[index] + pSupport->offset * pExpr->resBytes; + return pSupport->data[index] + pSupport->offset * pExpr->base.resBytes; } TAOS_ROW doSetResultRowData(SSqlObj *pSql) { @@ -3402,23 +3411,29 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) { return pRes->tsrow; } - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); size_t size = tscNumOfFields(pQueryInfo); + + int32_t j = 0; for (int i = 0; i < size; ++i) { SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + if (!pInfo->visible) { + continue; + } int32_t type = pInfo->field.type; int32_t bytes = pInfo->field.bytes; if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { - pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i]; + pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i]; } else { - pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]); - pRes->length[i] = varDataLen(pRes->urow[i]); + pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]); + pRes->length[j] = varDataLen(pRes->urow[i]); } ((char**) pRes->urow)[i] += bytes; + j += 1; } pRes->row++; // index increase one-step @@ -3429,7 +3444,7 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) { bool hasData = true; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { bool allSubqueryExhausted = true; @@ -3441,7 +3456,7 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) { SSqlRes *pRes1 = &pSql->pSubs[i]->res; SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd; - SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex); + SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd1, pCmd1->clauseIndex); assert(pQueryInfo1->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0); @@ -3465,10 +3480,11 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) { } SSqlRes * pRes1 = &pSql->pSubs[i]->res; - SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); + SQueryInfo *pQueryInfo1 = tscGetQueryInfo(&pSql->pSubs[i]->cmd, 0); if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) && - tscIsProjectionQuery(pQueryInfo1)) || (pRes1->numOfRows == 0)) { + tscIsProjectionQuery(pQueryInfo1)) || + (pRes1->numOfRows == 0)) { hasData = false; break; } @@ -3477,3 +3493,118 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) { return hasData; } + +void* createQueryInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, + SOperatorInfo* pSourceOperator, char* sql, void* merger, int32_t stage) { + assert(pQueryInfo != NULL); + int16_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput; + + SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo)); + if (pQInfo == NULL) { + goto _cleanup; + } + + // to make sure third party won't overwrite this structure + pQInfo->signature = pQInfo; + + SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; + SQueryAttr *pQueryAttr = &pQInfo->query; + + pRuntimeEnv->pQueryAttr = pQueryAttr; + tscCreateQueryFromQueryInfo(pQueryInfo, pQueryAttr, NULL); + + pQueryAttr->tableGroupInfo = *pTableGroupInfo; + + // calculate the result row size + for (int16_t col = 0; col < numOfOutput; ++col) { + assert(pExprs[col].base.resBytes > 0); + pQueryAttr->resultRowSize += pExprs[col].base.resBytes; + + // keep the tag length + if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) { + pQueryAttr->tagLen += pExprs[col].base.resBytes; + } + } + + size_t numOfGroups = 0; + if (pTableGroupInfo->pGroupList != NULL) { + numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList); + STableGroupInfo* pTableqinfo = &pQInfo->runtimeEnv.tableqinfoGroupInfo; + + pTableqinfo->pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); + pTableqinfo->numOfTables = pTableGroupInfo->numOfTables; + pTableqinfo->map = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + } + + pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); + if (pQInfo->pBuf == NULL) { + goto _cleanup; + } + + pQInfo->dataReady = QUERY_RESULT_NOT_READY; + pQInfo->rspContext = NULL; + pQInfo->sql = sql; + + pthread_mutex_init(&pQInfo->lock, NULL); + tsem_init(&pQInfo->ready, 0, 0); + + int32_t index = 0; + for(int32_t i = 0; i < numOfGroups; ++i) { + SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); + + size_t s = taosArrayGetSize(pa); + SArray* p1 = taosArrayInit(s, POINTER_BYTES); + if (p1 == NULL) { + goto _cleanup; + } + + taosArrayPush(pRuntimeEnv->tableqinfoGroupInfo.pGroupList, &p1); + + STimeWindow window = pQueryAttr->window; + for(int32_t j = 0; j < s; ++j) { + STableKeyInfo* info = taosArrayGet(pa, j); + window.skey = info->lastKey; + + void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); + STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf); + if (item == NULL) { + goto _cleanup; + } + + item->groupIndex = i; + taosArrayPush(p1, &item); + + STableId id = {.tid = 0, .uid = 0}; + taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES); + index += 1; + } + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExprInfo = &pExprs[i]; + if (pExprInfo->pExpr != NULL) { + tExprTreeDestroy(pExprInfo->pExpr, NULL); + pExprInfo->pExpr = NULL; + } + } + + tfree(pExprs); + + SArray* pa = NULL; + if (stage == MASTER_SCAN) { + pa = createExecOperatorPlan(pQueryAttr); + } else { + pa = createGlobalMergePlan(pQueryAttr); + } + + STsBufInfo bufInfo = {0}; + SQueryParam param = {.pOperator = pa}; + /*int32_t code = */initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, ¶m, NULL, 0, merger); + taosArrayDestroy(pa); + + return pQInfo; + + _cleanup: + freeQInfo(pQInfo); + return NULL; +} diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4e8bfdf06481d5026bc935e3e1445a419694b3cd..420b78f64dccfa6bac9302dc2c04c2ccd9ce44e5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -81,11 +81,11 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) { int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int32_t functId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + int32_t functId = pExpr->base.functionId; // "select count(tbname)" query - if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { + if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { continue; } @@ -101,10 +101,9 @@ bool tscQueryBlockInfo(SQueryInfo* pQueryInfo) { int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int32_t functId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + int32_t functId = pExpr->base.functionId; - // "select count(tbname)" query if (functId == TSDB_FUNC_BLKINFO) { return true; } @@ -154,7 +153,7 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { } for (int32_t i = 0; i < numOfExprs; ++i) { - int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->base.functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && @@ -193,7 +192,7 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->base.functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) { @@ -207,11 +206,11 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); assert(pExpr != NULL); - int32_t functionId = pExpr->functionId; - if (functionId == TSDB_FUNC_TAG) { + int32_t functionId = pExpr->base.functionId; + if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) { continue; } @@ -230,8 +229,8 @@ bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) { size_t numOfOutput = tscNumOfFields(pQueryInfo); for(int32_t i = 0; i < numOfOutput; ++i) { - SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo; - if (pExprInfo != NULL) { + SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pExpr; + if (pExprInfo->pExpr != NULL) { return true; } } @@ -254,16 +253,103 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { return false; } +bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TS) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return true; + } + } + + return false; +} + +bool isTsCompQuery(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + SExprInfo* pExpr1 = tscSqlExprGet(pQueryInfo, 0); + if (numOfExprs != 1) { + return false; + } + + return pExpr1->base.functionId == TSDB_FUNC_TS_COMP; +} + +bool hasTagValOutput(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + SExprInfo* pExpr1 = tscSqlExprGet(pQueryInfo, 0); + + if (numOfExprs == 1 && pExpr1->base.functionId == TSDB_FUNC_TS_COMP) { + return true; + } + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + // ts_comp column required the tag value for join filter + if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { + return true; + } + } + + return false; +} + +bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + int32_t functionId = pExpr->base.functionId; + if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) { + return true; + } + } + + return false; +} + +bool isStabledev(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + int32_t functionId = pExpr->base.functionId; + if (functionId == TSDB_FUNC_STDDEV_DST) { + return true; + } + } + + return false; +} + bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { continue; } - int32_t functionId = pExpr->functionId; - if (functionId == TSDB_FUNC_TWA) { + if (pExpr->base.functionId == TSDB_FUNC_TWA) { return true; } } @@ -271,40 +357,70 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { return false; } -bool tscIsTopbotQuery(SQueryInfo* pQueryInfo) { +bool tscNeedReverseScan(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { continue; } - int32_t functionId = pExpr->functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + int32_t functionId = pExpr->base.functionId; + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { + continue; + } + + if ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && pQueryInfo->order.order == TSDB_ORDER_DESC) { return true; } + + if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) { + // the scan order to acquire the last result of the specified column + int32_t order = (int32_t)pExpr->base.param[0].i64; + if (order != pQueryInfo->order.order) { + return true; + } + } } return false; } -int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo) { +bool isSimpleAggregate(SQueryInfo* pQueryInfo) { + if (pQueryInfo->interval.interval > 0) { + return false; + } + + // Note:top/bottom query is fixed output query + if (tscIsTopBotQuery(pQueryInfo) || tscGroupbyColumn(pQueryInfo) || isTsCompQuery(pQueryInfo)) { + return true; + } + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { continue; } - int32_t functionId = pExpr->functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - return (int32_t) pExpr->param[0].i64; + int32_t functionId = pExpr->base.functionId; + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) { + continue; + } + + if (!IS_MULTIOUTPUT(aAggs[functionId].status)) { + return true; } } - return 0; + return false; } +bool isBlockDistQuery(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, 0); + return (numOfExprs == 1 && pExpr->base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); +} void tscClearInterpInfo(SQueryInfo* pQueryInfo) { if (!tscIsPointInterpQuery(pQueryInfo)) { @@ -353,23 +469,23 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { offset += pInfo->field.bytes; // generated the user-defined column result - if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { - if (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) { + if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) { + if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) { setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows); } else { if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) { - assert(pInfo->pSqlExpr->param[1].nLen <= pInfo->field.bytes); + assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes); for (int32_t k = 0; k < pRes->numOfRows; ++k) { char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; - memcpy(varDataVal(p), pInfo->pSqlExpr->param[1].pz, pInfo->pSqlExpr->param[1].nLen); - varDataSetLen(p, pInfo->pSqlExpr->param[1].nLen); + memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen); + varDataSetLen(p, pInfo->pExpr->base.param[1].nLen); } } else { for (int32_t k = 0; k < pRes->numOfRows; ++k) { char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; - memcpy(p, &pInfo->pSqlExpr->param[1].i64, pInfo->field.bytes); + memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes); } } } @@ -406,6 +522,216 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { } } +void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock) { + assert(pRes->numOfCols > 0); + + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i); + + pRes->urow[i] = pColData->pData; + pRes->length[i] = pInfo->field.bytes; + + // generated the user-defined column result + if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) { + if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) { + setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows); + } else { + if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) { + assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes); + + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; + + memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen); + varDataSetLen(p, pInfo->pExpr->base.param[1].nLen); + } + } else { + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; + memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes); + } + } + } + + } else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { + // convert unicode to native code in a temporary buffer extra one byte for terminated symbol + pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + + // string terminated char for binary data + memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); + + char* p = pRes->urow[i]; + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* dst = pRes->buffer[i] + k * pInfo->field.bytes; + + if (isNull(p, TSDB_DATA_TYPE_NCHAR)) { + memcpy(dst, p, varDataTLen(p)); + } else if (varDataLen(p) > 0) { + int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); + varDataSetLen(dst, length); + + if (length == 0) { + tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); + } + } else { + varDataSetLen(dst, 0); + } + + p += pInfo->field.bytes; + } + + memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + } + } +} + +static SColumnInfo* extractColumnInfoFromResult(STableMeta* pTableMeta, SArray* pTableCols) { + int32_t numOfCols = (int32_t) taosArrayGetSize(pTableCols); + SColumnInfo* pColInfo = calloc(numOfCols, sizeof(SColumnInfo)); + + SSchema *pSchema = pTableMeta->schema; + for(int32_t i = 0; i < numOfCols; ++i) { + SColumn* pCol = taosArrayGetP(pTableCols, i); + int32_t index = pCol->columnIndex; + + pColInfo[i].type = pSchema[index].type; + pColInfo[i].bytes = pSchema[index].bytes; + pColInfo[i].colId = pSchema[index].colId; + } + + return pColInfo; +} + +typedef struct SDummyInputInfo { + SSDataBlock *block; + SSqlRes *pRes; // refactor: remove it +} SDummyInputInfo; + +SSDataBlock* doGetDataBlock(void* param, bool* newgroup) { + SOperatorInfo *pOperator = (SOperatorInfo*) param; + + SDummyInputInfo *pInput = pOperator->info; + char* pData = pInput->pRes->data; + + SSDataBlock* pBlock = pInput->block; + pBlock->info.rows = pInput->pRes->numOfRows; + if (pBlock->info.rows == 0) { + return NULL; + } + + //TODO refactor + int32_t offset = 0; + for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i); + if (pData != NULL) { + pColData->pData = pData + offset * pBlock->info.rows; + } else { + pColData->pData = pInput->pRes->urow[i]; + } + + offset += pColData->info.bytes; + } + + pInput->pRes->numOfRows = 0; + *newgroup = false; + return pBlock; +} + +static void destroyDummyInputOperator(void* param, int32_t numOfOutput) { + SDummyInputInfo* pInfo = (SDummyInputInfo*) param; + + // tricky + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pInfo->block->pDataBlock, i); + pColInfoData->pData = NULL; + } + + pInfo->block = destroyOutputBuf(pInfo->block); + pInfo->pRes = NULL; +} + +// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later +SOperatorInfo* createDummyInputOperator(char* pResult, SSchema* pSchema, int32_t numOfCols) { + assert(numOfCols > 0); + SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo)); + + pInfo->pRes = (SSqlRes*) pResult; + + pInfo->block = calloc(numOfCols, sizeof(SSDataBlock)); + pInfo->block->info.numOfCols = numOfCols; + + pInfo->block->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData colData = {{0}}; + colData.info.bytes = pSchema[i].bytes; + colData.info.type = pSchema[i].type; + colData.info.colId = pSchema[i].colId; + + taosArrayPush(pInfo->block->pDataBlock, &colData); + } + + SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo)); + pOptr->name = "DummyInputOperator"; + pOptr->operatorType = OP_DummyInput; + pOptr->numOfOutput = numOfCols; + pOptr->blockingOptr = false; + pOptr->info = pInfo; + pOptr->exec = doGetDataBlock; + pOptr->cleanup = destroyDummyInputOperator; + return pOptr; +} + +void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo) { + // set the correct result + SSDataBlock* p = pQueryInfo->pQInfo->runtimeEnv.outputBuf; + pRes->numOfRows = (p != NULL)? p->info.rows: 0; + + if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { + tscCreateResPointerInfo(pRes, pQueryInfo); + tscSetResRawPtrRv(pRes, pQueryInfo, p); + } + + pRes->row = 0; + pRes->completed = (pRes->numOfRows == 0); +} + +void handleDownstreamOperator(SSqlRes* pRes, SQueryInfo* pQueryInfo) { + if (pQueryInfo->pDownstream != NULL) { + // handle the following query process + SQueryInfo *px = pQueryInfo->pDownstream; + SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->pTableMetaInfo[0]->pTableMeta, px->colList); + int32_t numOfOutput = (int32_t) tscSqlExprNumOfExprs(px); + + int32_t numOfCols = (int32_t) taosArrayGetSize(px->colList); + SQueriedTableInfo info = {.colList = pColumnInfo, .numOfCols = numOfCols,}; + SSchema* pSchema = tscGetTableSchema(px->pTableMetaInfo[0]->pTableMeta); + + STableGroupInfo tableGroupInfo = {.numOfTables = 1, .pGroupList = taosArrayInit(1, POINTER_BYTES),}; + tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + + STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN}; + + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); + taosArrayPush(group, &tableKeyInfo); + + taosArrayPush(tableGroupInfo.pGroupList, &group); + + SOperatorInfo* pSourceOptr = createDummyInputOperator((char*)pRes, pSchema, numOfCols); + + SExprInfo *exprInfo = NULL; + /*int32_t code = */createQueryFunc(&info, numOfOutput, &exprInfo, px->exprList->pData, NULL, px->type, NULL); + px->pQInfo = createQueryInfoFromQueryNode(px, exprInfo, &tableGroupInfo, pSourceOptr, NULL, NULL, MASTER_SCAN); + + uint64_t qId = 0; + qTableQuery(px->pQInfo, &qId); + convertQueryResult(pRes, px); + + tfree(pColumnInfo); + } +} + static void tscDestroyResPointerInfo(SSqlRes* pRes) { if (pRes->buffer != NULL) { // free all buffers containing the multibyte string for (int i = 0; i < pRes->numOfCols; i++) { @@ -431,7 +757,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { } tfree(pRes->final); - + pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } @@ -441,10 +767,29 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) { } for (int32_t i = 0; i < pCmd->numOfClause; ++i) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, i); + + // recursive call it + if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, 0); + freeQueryInfoImpl(pUp); + clearAllTableMetaInfo(pUp, removeMeta); + if (pUp->pQInfo != NULL) { + qDestroyQueryInfo(pUp->pQInfo); + pUp->pQInfo = NULL; + } + + tfree(pUp); + } freeQueryInfoImpl(pQueryInfo); clearAllTableMetaInfo(pQueryInfo, removeMeta); + + if (pQueryInfo->pQInfo != NULL) { + qDestroyQueryInfo(pQueryInfo->pQInfo); + pQueryInfo->pQInfo = NULL; + } + tfree(pQueryInfo); } @@ -487,7 +832,7 @@ void tscFreeSqlResult(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; tscDestroyResPointerInfo(pRes); - + memset(&pSql->res, 0, sizeof(SSqlRes)); } @@ -496,10 +841,10 @@ void tscFreeSubobj(SSqlObj* pSql) { return; } - tscDebug("%p start to free sub SqlObj, numOfSub:%d", pSql, pSql->subState.numOfSub); + tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub); for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - tscDebug("%p free sub SqlObj:%p, index:%d", pSql, pSql->pSubs[i], i); + tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i); taos_free_result(pSql->pSubs[i]); pSql->pSubs[i] = NULL; } @@ -530,7 +875,7 @@ void tscFreeRegisteredSqlObj(void *pSql) { int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1); int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1); - tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total); + tscDebug("0x%"PRIx64" free SqlObj, total in tscObj:%d, total:%d", p->self, num, total); tscFreeSqlObj(p); taosReleaseRef(tscRefId, pTscObj->rid); @@ -553,7 +898,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { return; } - tscDebug("%p start to free sqlObj", pSql); + tscDebug("0x%"PRIx64" start to free sqlObj", pSql->self); pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; @@ -719,6 +1064,10 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { return TSDB_CODE_SUCCESS; } +SQueryInfo* tscGetActiveQueryInfo(SSqlCmd* pCmd) { + return pCmd->active; +} + /** * create the in-memory buffer for each table to keep the submitted data block * @param initialSize @@ -913,7 +1262,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); if (ret != TSDB_CODE_SUCCESS) { - tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); + tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); return ret; @@ -932,7 +1281,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { dataBuf->pData = tmp; memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); } else { // failed to allocate memory, free already allocated memory and return error code - tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize); + tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); @@ -945,7 +1294,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { tscSortRemoveDataBlockDupRows(pOneTableBlock); char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); - tscDebug("%p name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, tNameGetTableName(&pOneTableBlock->tableName), + tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); @@ -1043,12 +1392,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { assert(pFieldInfo != NULL); pFieldInfo->numOfOutput++; - struct SInternalField info = { - .pSqlExpr = NULL, - .pArithExprInfo = NULL, - .visible = true, - .pFieldFilters = NULL, - }; + struct SInternalField info = { .pExpr = NULL, .visible = true }; info.field = *pField; return taosArrayPush(pFieldInfo->internalField, &info); @@ -1056,12 +1400,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { pFieldInfo->numOfOutput++; - struct SInternalField info = { - .pSqlExpr = NULL, - .pArithExprInfo = NULL, - .visible = true, - .pFieldFilters = NULL, - }; + struct SInternalField info = { .pExpr = NULL, .visible = true }; info.field = *field; return taosArrayInsert(pFieldInfo->internalField, index, &info); @@ -1070,14 +1409,14 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); - pExpr->offset = 0; + SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); + pExpr->base.offset = 0; for (int32_t i = 1; i < numOfExprs; ++i) { - SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); - SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i); + SExprInfo* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); + SExprInfo* p = taosArrayGetP(pQueryInfo->exprList, i); - p->offset = prev->offset + prev->resBytes; + p->base.offset = prev->base.offset + prev->base.resBytes; } } @@ -1093,9 +1432,9 @@ TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index); - assert(pInfo != NULL && pInfo->pSqlExpr != NULL); + assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL); - return pInfo->pSqlExpr->offset; + return pInfo->pExpr->base.offset; } int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) { @@ -1127,141 +1466,162 @@ int32_t tscGetResRowLength(SArray* pExprList) { int32_t size = 0; for(int32_t i = 0; i < num; ++i) { - SSqlExpr* pExpr = taosArrayGetP(pExprList, i); - size += pExpr->resBytes; + SExprInfo* pExpr = taosArrayGetP(pExprList, i); + size += pExpr->base.resBytes; } return size; } -static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { - for(int32_t i = 0; i < numOfFilters; ++i) { - if (pFilterInfo[i].filterstr) { - tfree(pFilterInfo[i].pz); +static void destroyFilterInfo(SColumnFilterList* pFilterList) { + for(int32_t i = 0; i < pFilterList->numOfFilters; ++i) { + if (pFilterList->filterInfo[i].filterstr) { + tfree(pFilterList->filterInfo[i].pz); } } - - tfree(pFilterInfo); -} -static void tscColumnDestroy(SColumn* pCol) { - destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters); - free(pCol); + tfree(pFilterList->filterInfo); + pFilterList->numOfFilters = 0; } +void* sqlExprDestroy(SExprInfo* pExpr) { + if (pExpr == NULL) { + return NULL; + } + + SSqlExpr* p = &pExpr->base; + for(int32_t i = 0; i < tListLen(p->param); ++i) { + tVariantDestroy(&p->param[i]); + } + + if (p->flist.numOfFilters > 0) { + tfree(p->flist.filterInfo); + } + + if (pExpr->pExpr != NULL) { + tExprTreeDestroy(pExpr->pExpr, NULL); + } + + tfree(pExpr); + return NULL; +} void tscFieldInfoClear(SFieldInfo* pFieldInfo) { if (pFieldInfo == NULL) { return; } - for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) { - SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i); - - if (pInfo->pArithExprInfo != NULL) { - tExprTreeDestroy(pInfo->pArithExprInfo->pExpr, NULL); - - SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; - for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) { - if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { - tfree(pFuncMsg->arg[j].argValue.pz); - } + if (pFieldInfo->internalField != NULL) { + size_t num = taosArrayGetSize(pFieldInfo->internalField); + for (int32_t i = 0; i < num; ++i) { + SInternalField* pfield = taosArrayGet(pFieldInfo->internalField, i); + if (pfield->pExpr != NULL && pfield->pExpr->pExpr != NULL) { + sqlExprDestroy(pfield->pExpr); } - - tfree(pInfo->pArithExprInfo); - } - - if (pInfo->pFieldFilters != NULL) { - tscColumnDestroy(pInfo->pFieldFilters->pFilters); - tfree(pInfo->pFieldFilters); } } - + taosArrayDestroy(pFieldInfo->internalField); + tfree(pFieldInfo->final); memset(pFieldInfo, 0, sizeof(SFieldInfo)); } -static SSqlExpr* doCreateSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { +static SExprInfo* doCreateSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, + int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); - SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr)); + SExprInfo* pExpr = calloc(1, sizeof(SExprInfo)); if (pExpr == NULL) { return NULL; } - pExpr->functionId = functionId; + SSqlExpr* p = &pExpr->base; + p->functionId = functionId; // set the correct columnIndex index if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX; + SSchema* s = tGetTbnameColumnSchema(); + p->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX; + p->colBytes = s->bytes; + p->colType = s->type; } else if (pColIndex->columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) { - pExpr->colInfo.colId = TSDB_BLOCK_DIST_COLUMN_INDEX; + SSchema s = tGetBlockDistColumnSchema(); + + p->colInfo.colId = TSDB_BLOCK_DIST_COLUMN_INDEX; + p->colBytes = s.bytes; + p->colType = s.type; } else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) { - pExpr->colInfo.colId = pColIndex->columnIndex; + p->colInfo.colId = pColIndex->columnIndex; + p->colBytes = size; + p->colType = type; } else { if (TSDB_COL_IS_TAG(colType)) { SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId; - tstrncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(pExpr->colInfo.name)); + p->colInfo.colId = pSchema[pColIndex->columnIndex].colId; + p->colBytes = pSchema[pColIndex->columnIndex].bytes; + p->colType = pSchema[pColIndex->columnIndex].type; + tstrncpy(p->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(p->colInfo.name)); } else if (pTableMetaInfo->pTableMeta != NULL) { // in handling select database/version/server_status(), the pTableMeta is NULL SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex); - pExpr->colInfo.colId = pSchema->colId; - tstrncpy(pExpr->colInfo.name, pSchema->name, sizeof(pExpr->colInfo.name)); + p->colInfo.colId = pSchema->colId; + p->colBytes = pSchema->bytes; + p->colType = pSchema->type; + tstrncpy(p->colInfo.name, pSchema->name, sizeof(p->colInfo.name)); } } - pExpr->colInfo.flag = colType; - pExpr->colInfo.colIndex = pColIndex->columnIndex; + p->colInfo.flag = colType; + p->colInfo.colIndex = pColIndex->columnIndex; - pExpr->resType = type; - pExpr->resBytes = size; - pExpr->resColId = resColId; - pExpr->interBytes = interSize; + p->resType = type; + p->resBytes = size; + p->resColId = resColId; + p->interBytes = interSize; if (pTableMetaInfo->pTableMeta) { - pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; + p->uid = pTableMetaInfo->pTableMeta->id.uid; } return pExpr; } -SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList); if (index == num) { return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } - SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + SExprInfo* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayInsert(pQueryInfo->exprList, index, &pExpr); return pExpr; } -SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { - SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + SExprInfo* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayPush(pQueryInfo->exprList, &pExpr); return pExpr; } -SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, +SExprInfo* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, index); + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, index); if (pExpr == NULL) { return NULL; } - pExpr->functionId = functionId; + SSqlExpr* pse = &pExpr->base; + pse->functionId = functionId; - pExpr->colInfo.colIndex = srcColumnIndex; - pExpr->colInfo.colId = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, srcColumnIndex)->colId; + pse->colInfo.colIndex = srcColumnIndex; + pse->colInfo.colId = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, srcColumnIndex)->colId; - pExpr->resType = type; - pExpr->resBytes = size; + pse->resType = type; + pse->resBytes = size; return pExpr; } @@ -1273,8 +1633,8 @@ bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) { int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); for(int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) { return true; } } @@ -1297,32 +1657,18 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) assert(pExpr->numOfParams <= 3); } -SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index) { +SExprInfo* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index) { return taosArrayGetP(pQueryInfo->exprList, index); } -void* sqlExprDestroy(SSqlExpr* pExpr) { - if (pExpr == NULL) { - return NULL; - } - - for(int32_t i = 0; i < tListLen(pExpr->param); ++i) { - tVariantDestroy(&pExpr->param[i]); - } - - tfree(pExpr); - - return NULL; -} - /* - * NOTE: Does not release SSqlExprInfo here. + * NOTE: Does not release SExprInfo here. */ void tscSqlExprInfoDestroy(SArray* pExprInfo) { size_t size = taosArrayGetSize(pExprInfo); for(int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = taosArrayGetP(pExprInfo, i); + SExprInfo* pExpr = taosArrayGetP(pExprInfo, i); sqlExprDestroy(pExpr); } @@ -1334,46 +1680,36 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco size_t size = taosArrayGetSize(src); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = taosArrayGetP(src, i); + SExprInfo* pExpr = taosArrayGetP(src, i); - if (pExpr->uid == uid) { - + if (pExpr->base.uid == uid) { if (deepcopy) { - SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr)); - if (p1 == NULL) { - return -1; - } + SExprInfo* p1 = calloc(1, sizeof(SExprInfo)); + tscSqlExprAssign(p1, pExpr); - *p1 = *pExpr; - memset(p1->param, 0, sizeof(tVariant) * tListLen(p1->param)); - - for (int32_t j = 0; j < pExpr->numOfParams; ++j) { - tVariantAssign(&p1->param[j], &pExpr->param[j]); - } - taosArrayPush(dst, &p1); } else { taosArrayPush(dst, &pExpr); } + } } return 0; } -bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) { +bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) { // ignore the tbname columnIndex to be inserted into source list - if (pColIndex->columnIndex < 0) { + if (columnIndex < 0) { return false; } - + size_t numOfCols = taosArrayGetSize(pColumnList); - int16_t col = pColIndex->columnIndex; int32_t i = 0; while (i < numOfCols) { SColumn* pCol = taosArrayGetP(pColumnList, i); - if ((pCol->colIndex.columnIndex != col) || (pCol->colIndex.tableIndex != pColIndex->tableIndex)) { + if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) { ++i; continue; } else { @@ -1388,22 +1724,38 @@ bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) { return true; } +void tscSqlExprAssign(SExprInfo* dst, const SExprInfo* src) { + assert(dst != NULL && src != NULL); + + *dst = *src; -SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { + if (src->base.flist.numOfFilters > 0) { + dst->base.flist.filterInfo = calloc(src->base.flist.numOfFilters, sizeof(SColumnFilterInfo)); + memcpy(dst->base.flist.filterInfo, src->base.flist.filterInfo, sizeof(SColumnFilterInfo) * src->base.flist.numOfFilters); + } + + dst->pExpr = exprdup(src->pExpr); + + memset(dst->base.param, 0, sizeof(tVariant) * tListLen(dst->base.param)); + for (int32_t j = 0; j < src->base.numOfParams; ++j) { + tVariantAssign(&dst->base.param[j], &src->base.param[j]); + } +} + +SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema) { // ignore the tbname columnIndex to be inserted into source list - if (pColIndex->columnIndex < 0) { + if (columnIndex < 0) { return NULL; } size_t numOfCols = taosArrayGetSize(pColumnList); - int16_t col = pColIndex->columnIndex; int32_t i = 0; while (i < numOfCols) { SColumn* pCol = taosArrayGetP(pColumnList, i); - if (pCol->colIndex.columnIndex < col) { + if (pCol->columnIndex < columnIndex) { i++; - } else if (pCol->colIndex.tableIndex < pColIndex->tableIndex) { + } else if (pCol->tableUid < uid) { i++; } else { break; @@ -1416,18 +1768,28 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { return NULL; } - b->colIndex = *pColIndex; + b->columnIndex = columnIndex; + b->tableUid = uid; + b->info.colId = pSchema->colId; + b->info.bytes = pSchema->bytes; + b->info.type = pSchema->type; + taosArrayInsert(pColumnList, i, &b); } else { SColumn* pCol = taosArrayGetP(pColumnList, i); - if (i < numOfCols && (pCol->colIndex.columnIndex > col || pCol->colIndex.tableIndex != pColIndex->tableIndex)) { + if (i < numOfCols && (pCol->columnIndex > columnIndex || pCol->tableUid != uid)) { SColumn* b = calloc(1, sizeof(SColumn)); if (b == NULL) { return NULL; } - b->colIndex = *pColIndex; + b->columnIndex = columnIndex; + b->tableUid = uid; + b->info.colId = pSchema->colId; + b->info.bytes = pSchema->bytes; + b->info.type = pSchema->type; + taosArrayInsert(pColumnList, i, &b); } } @@ -1445,22 +1807,29 @@ SColumn* tscColumnClone(const SColumn* src) { return NULL; } - dst->colIndex = src->colIndex; - dst->numOfFilters = src->numOfFilters; - dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters); - + dst->columnIndex = src->columnIndex; + dst->tableUid = src->tableUid; + dst->info.flist.numOfFilters = src->info.flist.numOfFilters; + dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters); + dst->info.type = src->info.type; + dst->info.colId = src->info.colId; + dst->info.bytes = src->info.bytes; return dst; } +static void tscColumnDestroy(SColumn* pCol) { + destroyFilterInfo(&pCol->info.flist); + free(pCol); +} -void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) { +void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) { assert(src != NULL && dst != NULL); size_t num = taosArrayGetSize(src); for (int32_t i = 0; i < num; ++i) { SColumn* pCol = taosArrayGetP(src, i); - if (pCol->colIndex.tableIndex == tableIndex || tableIndex < 0) { + if (pCol->tableUid == tableUid) { SColumn* p = tscColumnClone(pCol); taosArrayPush(dst, &p); } @@ -1682,7 +2051,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { dest->tbnameCond.len = src->tbnameCond.len; dest->joinInfo.hasJoin = src->joinInfo.hasJoin; - + for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) { if (src->joinInfo.joinTables[i]) { dest->joinInfo.joinTables[i] = calloc(1, sizeof(SJoinNode)); @@ -1699,7 +2068,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { } } - + dest->relType = src->relType; if (src->pCond == NULL) { @@ -1771,16 +2140,16 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - pColInfo[i].functionId = pExpr->functionId; + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + pColInfo[i].functionId = pExpr->base.functionId; - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - int16_t index = pExpr->colInfo.colIndex; + int16_t index = pExpr->base.colInfo.colIndex; pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY; } else { - pColInfo[i].type = pSchema[pExpr->colInfo.colIndex].type; + pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type; } } } @@ -1830,7 +2199,7 @@ STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd* pCmd, int32_t clauseIndex, i assert(clauseIndex >= 0 && clauseIndex < pCmd->numOfClause); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, clauseIndex); return tscGetMetaInfo(pQueryInfo, tableIndex); } @@ -1847,17 +2216,17 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) { return pQueryInfo->pTableMetaInfo[tableIndex]; } -SQueryInfo* tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); +SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd, int32_t subClauseIndex) { + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, subClauseIndex); int32_t ret = TSDB_CODE_SUCCESS; while ((pQueryInfo) == NULL) { - if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) { + if ((ret = tscAddQueryInfo(pCmd)) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; return NULL; } - pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + pQueryInfo = tscGetQueryInfo(pCmd, subClauseIndex); } return pQueryInfo; @@ -1886,18 +2255,20 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField)); assert(pQueryInfo->exprList == NULL); - pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); - pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); - pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX; - pQueryInfo->resColumnId = -1000; - pQueryInfo->limit.limit = -1; - pQueryInfo->limit.offset = 0; - pQueryInfo->slimit.limit = -1; - pQueryInfo->slimit.offset = 0; + pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); + pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); + pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX; + pQueryInfo->resColumnId = TSDB_RES_COL_ID; + pQueryInfo->limit.limit = -1; + pQueryInfo->limit.offset = 0; + + pQueryInfo->slimit.limit = -1; + pQueryInfo->slimit.offset = 0; + pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES); } -int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) { +int32_t tscAddQueryInfo(SSqlCmd* pCmd) { assert(pCmd != NULL); // todo refactor: remove this structure @@ -1943,11 +2314,14 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { tfree(pQueryInfo->fillVal); tfree(pQueryInfo->buf); + + taosArrayDestroy(pQueryInfo->pUpstream); + pQueryInfo->pUpstream = NULL; } void tscClearSubqueryInfo(SSqlCmd* pCmd) { for (int32_t i = 0; i < pCmd->numOfClause; ++i) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, i); freeQueryInfoImpl(pQueryInfo); } } @@ -2020,7 +2394,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) { for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); - if (removeMeta) { + if (removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, name); @@ -2069,7 +2443,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } if (pTagCols != NULL) { - tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1); + tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, pTableMetaInfo->pTableMeta->id.uid); } pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables); @@ -2109,13 +2483,13 @@ void registerSqlObj(SSqlObj* pSql) { int32_t num = atomic_add_fetch_32(&pSql->pTscObj->numOfObj, 1); int32_t total = atomic_add_fetch_32(&tscNumOfObj, 1); - tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total); + tscDebug("0x%"PRIx64" new SqlObj from %p, total in tscObj:%d, total:%d", pSql->self, pSql->pTscObj, num, total); } SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) { SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { - tscError("%p new subquery failed, tableIndex:%d", pSql, 0); + tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, 0); return NULL; } @@ -2129,12 +2503,12 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); if (code != TSDB_CODE_SUCCESS) { - tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0); + tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0); free(pNew); return NULL; } - if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) { + if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) { #ifdef __APPLE__ // to satisfy later tsem_destroy in taos_free_result tsem_init(&pNew->rspSem, 0, 0); @@ -2149,7 +2523,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in pNew->sqlstr = NULL; pNew->maxRetry = TSDB_MAX_REPLICA; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoS(pCmd, 0); assert(pSql->cmd.clauseIndex == 0); STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); @@ -2167,13 +2541,12 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui } // set the field info in pNewQueryInfo object according to sqlExpr information - size_t numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); - for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pNewQueryInfo, i); + for (int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pNewQueryInfo, i); - TAOS_FIELD f = tscCreateField((int8_t) pExpr->resType, pExpr->aliasName, pExpr->resBytes); + TAOS_FIELD f = tscCreateField((int8_t) pExpr->base.resType, pExpr->base.aliasName, pExpr->base.resBytes); SInternalField* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); - pInfo1->pSqlExpr = pExpr; + pInfo1->pExpr = pExpr; } // update the pSqlExpr pointer in SInternalField according the field name @@ -2182,12 +2555,12 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); bool matched = false; - for (int32_t k1 = 0; k1 < numOfExprs; ++k1) { - SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); + for (int32_t k1 = 0; k1 < numOfOutput; ++k1) { + SExprInfo* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); - if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name + if (strcmp(field->name, pExpr1->base.aliasName) == 0) { // establish link according to the result field name SInternalField* pInfo = tscFieldInfoGetInternalField(&pNewQueryInfo->fieldsInfo, f); - pInfo->pSqlExpr = pExpr1; + pInfo->pExpr = pExpr1; matched = true; break; @@ -2206,12 +2579,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { - tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex); + tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, tableIndex); terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; return NULL; } - - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, tableIndex); + + SQueryInfo* pQueryInfo = tscGetActiveQueryInfo(pCmd); + STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[tableIndex]; pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; @@ -2224,7 +2598,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->payload = NULL; pnCmd->allocSize = 0; - pnCmd->pQueryInfo = NULL; + pnCmd->pQueryInfo = NULL; pnCmd->numOfClause = 0; pnCmd->clauseIndex = 0; pnCmd->pDataBlocks = NULL; @@ -2236,15 +2610,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->tagData.data = NULL; pnCmd->tagData.dataLen = 0; - if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { + if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pnCmd, 0); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pnCmd, 0); pNewQueryInfo->command = pQueryInfo->command; + pnCmd->active = pNewQueryInfo; + memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval)); pNewQueryInfo->type = pQueryInfo->type; pNewQueryInfo->window = pQueryInfo->window; @@ -2295,22 +2670,22 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t } if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { - tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex); + tscError("0x%"PRIx64" new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql->self, tableIndex, pTableMetaInfo->vgroupIndex); terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - - tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex); + + uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; + tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, uid); // set the correct query type if (pPrevSql != NULL) { - SQueryInfo* pPrevQueryInfo = tscGetQueryInfoDetail(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex); + SQueryInfo* pPrevQueryInfo = tscGetQueryInfo(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex); pNewQueryInfo->type = pPrevQueryInfo->type; } else { TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY);// it must be the subquery } - uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; if (tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true) != 0) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; @@ -2346,7 +2721,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t // this case cannot be happened if (pFinalInfo->pTableMeta == NULL) { - tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name)); + tscError("0x%"PRIx64" new subquery failed since no tableMeta, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name)); if (pPrevSql != NULL) { // pass the previous error to client assert(pPrevSql->res.code != TSDB_CODE_SUCCESS); @@ -2364,22 +2739,22 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t assert(pFinalInfo->vgroupList != NULL); } + registerSqlObj(pNew); + if (cmd == TSDB_SQL_SELECT) { size_t size = taosArrayGetSize(pNewQueryInfo->colList); - tscDebug( - "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu "," + tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu "," "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, - pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), + pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey, pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit); - tscPrintSelectClause(pNew, 0); + tscPrintSelNodeList(pNew, 0); } else { - tscDebug("%p new sub insertion: %p, vnodeIdx:%d", pSql, pNew, pTableMetaInfo->vgroupIndex); + tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex); } - registerSqlObj(pNew); return pNew; _error: @@ -2387,7 +2762,54 @@ _error: return NULL; } +void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { + uint16_t type = pQueryInfo->type; + if (QUERY_IS_JOIN_QUERY(type) && !TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) { + tscHandleMasterJoinQuery(pSql); + } else if (tscMultiRoundQuery(pQueryInfo, 0) && pQueryInfo->round == 0) { + tscHandleFirstRoundStableQuery(pSql); // todo lock? + } else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query + tscLockByThread(&pSql->squeryLock); + tscHandleMasterSTableQuery(pSql); + tscUnlockByThread(&pSql->squeryLock); + } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { + tscHandleMultivnodeInsert(pSql); + } else if (pSql->cmd.command > TSDB_SQL_LOCAL) { + tscProcessLocalCmd(pSql); + } else { // send request to server directly + tscBuildAndSendRequest(pSql, pQueryInfo); + } +} + +// do execute the query according to the query execution plan +void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { + if (pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { + (*pSql->fp)(pSql->param, pSql, 0); + return; + } + + if (pSql->cmd.command == TSDB_SQL_SELECT) { + tscAddIntoSqlList(pSql); + } + + if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + SQueryInfo* pq = taosArrayGetP(pQueryInfo->pUpstream, 0); + + pSql->cmd.active = pq; + pSql->cmd.command = TSDB_SQL_SELECT; + + executeQuery(pSql, pq); + + // merge nest query result and generate final results + return; + } + + pSql->cmd.active = pQueryInfo; + doExecuteQuery(pSql, pQueryInfo); +} + /** + * todo remove it * To decide if current is a two-stage super table query, join query, or insert. And invoke different * procedure accordingly * @param pSql @@ -2410,27 +2832,22 @@ void tscDoQuery(SSqlObj* pSql) { if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { tscImportDataFromFile(pSql); } else { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); uint16_t type = pQueryInfo->type; - - if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion - tscHandleMultivnodeInsert(pSql); - return; - } - + if (QUERY_IS_JOIN_QUERY(type)) { if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) { tscHandleMasterJoinQuery(pSql); } else { // for first stage sub query, iterate all vnodes to get all timestamp if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } else { // secondary stage join query. if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query tscLockByThread(&pSql->squeryLock); tscHandleMasterSTableQuery(pSql); tscUnlockByThread(&pSql->squeryLock); } else { - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } } } @@ -2445,8 +2862,9 @@ void tscDoQuery(SSqlObj* pSql) { tscUnlockByThread(&pSql->squeryLock); return; } - - tscProcessSql(pSql); + + pCmd->active = pQueryInfo; + tscBuildAndSendRequest(pSql, NULL); } } @@ -2506,7 +2924,7 @@ bool tscIsQueryWithLimit(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; for (int32_t i = 0; i < pCmd->numOfClause; ++i) { - SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i); + SQueryInfo* pqi = tscGetQueryInfoS(pCmd, i); if (pqi == NULL) { continue; } @@ -2591,7 +3009,7 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) { } assert(pRes->completed); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); // for normal table, no need to try any more if results are all retrieved from one vnode @@ -2616,7 +3034,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); /* * no result returned from the current virtual node anymore, try the next vnode if exists @@ -2627,7 +3045,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups; if (++pTableMetaInfo->vgroupIndex < totalVgroups) { - tscDebug("%p results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql, + tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal); /* @@ -2646,8 +3064,8 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { pQueryInfo->limit.offset = pRes->offset; assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - tscDebug("%p new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, - pSql, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit); + tscDebug("0x%"PRIx64" new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, + pSql->self, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit); /* * For project query with super table join, the numOfSub is equalled to the number of all subqueries. @@ -2662,9 +3080,9 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { // set the callback function pSql->fp = fp; - tscProcessSql(pSql); + tscBuildAndSendRequest(pSql, NULL); } else { - tscDebug("%p try all %d vnodes, query complete. current numOfRes:%" PRId64, pSql, totalVgroups, pRes->numOfClauseTotal); + tscDebug("0x%"PRIx64" try all %d vnodes, query complete. current numOfRes:%" PRId64, pSql->self, totalVgroups, pRes->numOfClauseTotal); } } @@ -2676,7 +3094,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { assert(pCmd->clauseIndex < pCmd->numOfClause - 1); pCmd->clauseIndex++; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); pSql->cmd.command = pQueryInfo->command; @@ -2690,11 +3108,11 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { pSql->subState.numOfSub = 0; pSql->fp = fp; - tscDebug("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); + tscDebug("0x%"PRIx64" try data in the next subclause:%d, total subclause:%d", pSql->self, pCmd->clauseIndex, pCmd->numOfClause); if (pCmd->command > TSDB_SQL_LOCAL) { tscProcessLocalCmd(pSql); } else { - tscDoQuery(pSql); + executeQuery(pSql, pQueryInfo); } } @@ -2953,4 +3371,288 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { return p; } +static int32_t createSecondaryExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo) { + if (!tscIsSecondStageQuery(pQueryInfo)) { + return TSDB_CODE_SUCCESS; + } + + pQueryAttr->numOfExpr2 = tscNumOfFields(pQueryInfo); + pQueryAttr->pExpr2 = calloc(pQueryAttr->numOfExpr2, sizeof(SExprInfo)); + if (pQueryAttr->pExpr2 == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < pQueryAttr->numOfExpr2; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + SExprInfo* pExpr = pField->pExpr; + + SSqlExpr *pse = &pQueryAttr->pExpr2[i].base; + pse->uid = pTableMetaInfo->pTableMeta->id.uid; + pse->resColId = pExpr->base.resColId; + + if (pExpr->base.functionId != TSDB_FUNC_ARITHM) { // this should be switched to projection query + pse->numOfParams = 0; // no params for projection query + pse->functionId = TSDB_FUNC_PRJ; + pse->colInfo.colId = pExpr->base.resColId; + + for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) { + if (pQueryAttr->pExpr1[j].base.resColId == pse->colInfo.colId) { + pse->colInfo.colIndex = j; + } + } + + pse->colInfo.flag = TSDB_COL_NORMAL; + pse->resType = pExpr->base.resType; + pse->resBytes = pExpr->base.resBytes; + + // TODO restore refactor + int32_t functionId = pExpr->base.functionId; + if (pExpr->base.functionId == TSDB_FUNC_FIRST_DST) { + functionId = TSDB_FUNC_FIRST; + } else if (pExpr->base.functionId == TSDB_FUNC_LAST_DST) { + functionId = TSDB_FUNC_LAST; + } else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) { + functionId = TSDB_FUNC_STDDEV; + } + + int32_t inter = 0; + getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, functionId, 0, &pse->resType, + &pse->resBytes, &inter, 0, false); + pse->colType = pse->resType; + pse->colBytes = pse->resBytes; + + } else { // arithmetic expression + pse->colInfo.colId = pExpr->base.colInfo.colId; + pse->colType = pExpr->base.colType; + pse->colBytes = pExpr->base.colBytes; + pse->resBytes = sizeof(double); + pse->resType = TSDB_DATA_TYPE_DOUBLE; + + pse->functionId = pExpr->base.functionId; + pse->numOfParams = pExpr->base.numOfParams; + + for (int32_t j = 0; j < pExpr->base.numOfParams; ++j) { + tVariantAssign(&pse->param[j], &pExpr->base.param[j]); + buildArithmeticExprFromMsg(&pQueryAttr->pExpr2[i], NULL); + } + } + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t createGlobalAggregateExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInfo) { + assert(tscIsTwoStageSTableQuery(pQueryInfo, 0)); + + pQueryAttr->numOfExpr3 = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + pQueryAttr->pExpr3 = calloc(pQueryAttr->numOfExpr3, sizeof(SExprInfo)); + if (pQueryAttr->pExpr3 == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < pQueryAttr->numOfExpr3; ++i) { + SExprInfo* pExpr = &pQueryAttr->pExpr1[i]; + SSqlExpr* pse = &pQueryAttr->pExpr3[i].base; + + tscSqlExprAssign(&pQueryAttr->pExpr3[i], pExpr); + pse->colInfo.colId = pExpr->base.resColId; + pse->colInfo.colIndex = i; + + pse->colType = pExpr->base.resType; + pse->colBytes = pExpr->base.resBytes; + } + + { + for (int32_t i = 0; i < pQueryAttr->numOfExpr3; ++i) { + SExprInfo* pExpr = &pQueryAttr->pExpr1[i]; + SSqlExpr* pse = &pQueryAttr->pExpr3[i].base; + + // the final result size and type in the same as query on single table. + // so here, set the flag to be false; + int32_t inter = 0; + + int32_t functionId = pExpr->base.functionId; + if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) { + continue; + } + + if (functionId == TSDB_FUNC_FIRST_DST) { + functionId = TSDB_FUNC_FIRST; + } else if (functionId == TSDB_FUNC_LAST_DST) { + functionId = TSDB_FUNC_LAST; + } else if (functionId == TSDB_FUNC_STDDEV_DST) { + functionId = TSDB_FUNC_STDDEV; + } + + getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, functionId, 0, &pse->resType, &pse->resBytes, &inter, + 0, false); + } + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo) { + if (pTableMetaInfo->tagColList == NULL) { + return TSDB_CODE_SUCCESS; + } + + pQueryAttr->numOfTags = (int16_t)taosArrayGetSize(pTableMetaInfo->tagColList); + if (pQueryAttr->numOfTags == 0) { + return TSDB_CODE_SUCCESS; + } + STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[0]->pTableMeta; + + int32_t numOfTagColumns = tscGetNumOfTags(pTableMeta); + + pQueryAttr->tagColList = calloc(pQueryAttr->numOfTags, sizeof(SColumnInfo)); + if (pQueryAttr->tagColList == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + SSchema* pSchema = tscGetTableTagSchema(pTableMeta); + for (int32_t i = 0; i < pQueryAttr->numOfTags; ++i) { + SColumn* pCol = taosArrayGetP(pTableMetaInfo->tagColList, i); + SSchema* pColSchema = &pSchema[pCol->columnIndex]; + + if ((pCol->columnIndex >= numOfTagColumns || pCol->columnIndex < TSDB_TBNAME_COLUMN_INDEX) || + (!isValidDataType(pColSchema->type))) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + SColumnInfo* pTagCol = &pQueryAttr->tagColList[i]; + + pTagCol->colId = pColSchema->colId; + pTagCol->bytes = pColSchema->bytes; + pTagCol->type = pColSchema->type; + pTagCol->flist.numOfFilters = 0; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) { + memset(pQueryAttr, 0, sizeof(SQueryAttr)); + + int16_t numOfCols = (int16_t) taosArrayGetSize(pQueryInfo->colList); + int16_t numOfOutput = (int16_t) tscSqlExprNumOfExprs(pQueryInfo); + + pQueryAttr->topBotQuery = tscIsTopBotQuery(pQueryInfo); + pQueryAttr->hasTagResults = hasTagValOutput(pQueryInfo); + pQueryAttr->stabledev = isStabledev(pQueryInfo); + pQueryAttr->tsCompQuery = isTsCompQuery(pQueryInfo); + pQueryAttr->simpleAgg = isSimpleAggregate(pQueryInfo); + pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo); + pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type); + pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo); + pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); + pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); + pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); + pQueryAttr->distinctTag = pQueryInfo->distinctTag; + + pQueryAttr->numOfCols = numOfCols; + pQueryAttr->numOfOutput = numOfOutput; + pQueryAttr->limit = pQueryInfo->limit; + pQueryAttr->slimit = pQueryInfo->slimit; + pQueryAttr->order = pQueryInfo->order; + pQueryAttr->fillType = pQueryInfo->fillType; + pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo); + pQueryAttr->havingNum = pQueryInfo->havingFieldNum; + + if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor + pQueryAttr->window = pQueryInfo->window; + } else { + pQueryAttr->window.skey = pQueryInfo->window.ekey; + pQueryAttr->window.ekey = pQueryInfo->window.skey; + } + + memcpy(&pQueryAttr->interval, &pQueryInfo->interval, sizeof(pQueryAttr->interval)); + + STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0]; + + pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SSqlGroupbyExpr)); + *(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr; + + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo); + } else { + assert(pQueryInfo->groupbyExpr.columnInfo == NULL); + } + + pQueryAttr->pExpr1 = calloc(pQueryAttr->numOfOutput, sizeof(SExprInfo)); + for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); + tscSqlExprAssign(&pQueryAttr->pExpr1[i], pExpr); + + if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_ARITHM) { + for (int32_t j = 0; j < pQueryAttr->pExpr1[i].base.numOfParams; ++j) { + buildArithmeticExprFromMsg(&pQueryAttr->pExpr1[i], NULL); + } + } + } + + pQueryAttr->tableCols = calloc(numOfCols, sizeof(SColumnInfo)); + for(int32_t i = 0; i < numOfCols; ++i) { + SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); + if (!isValidDataType(pCol->info.type) || pCol->info.type == TSDB_DATA_TYPE_NULL) { + assert(0); + } + + pQueryAttr->tableCols[i] = pCol->info; + pQueryAttr->tableCols[i].flist.filterInfo = tFilterInfoDup(pCol->info.flist.filterInfo, pQueryAttr->tableCols[i].flist.numOfFilters); + } + + // global aggregate query + if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0) && tscIsTwoStageSTableQuery(pQueryInfo, 0)) { + createGlobalAggregateExpr(pQueryAttr, pQueryInfo); + } + + // for simple table, not for super table + int32_t code = createSecondaryExpr(pQueryAttr, pQueryInfo, pTableMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // tag column info + code = createTagColumnInfo(pQueryAttr, pQueryInfo, pTableMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pQueryAttr->fillType != TSDB_FILL_NONE) { + pQueryAttr->fillVal = calloc(pQueryAttr->numOfOutput, sizeof(int64_t)); + memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryAttr->numOfOutput * sizeof(int64_t)); + } + + pQueryAttr->srcRowSize = 0; + pQueryAttr->maxTableColumnWidth = 0; + for (int16_t i = 0; i < numOfCols; ++i) { + pQueryAttr->srcRowSize += pQueryAttr->tableCols[i].bytes; + if (pQueryAttr->maxTableColumnWidth < pQueryAttr->tableCols[i].bytes) { + pQueryAttr->maxTableColumnWidth = pQueryAttr->tableCols[i].bytes; + } + } + + pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr); + + if (pQueryAttr->numOfCols <= 0 && !tscQueryTags(pQueryInfo) && !pQueryAttr->queryBlockDist) { + tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", addr, + (uint64_t)pQueryAttr->numOfCols, numOfCols); + + return TSDB_CODE_TSC_INVALID_SQL; + } + + if (pQueryAttr->interval.interval < 0) { + tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, addr, + (int64_t)pQueryInfo->interval.interval); + return TSDB_CODE_TSC_INVALID_SQL; + } + + if (pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) { + tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols); + return TSDB_CODE_TSC_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index acfbffc01e400f8b111ee92b7651bb048c112bd2..a0854ce81b96c5cb3c77614b7f92497cf4c56340 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_TAST_H -#define TDENGINE_TAST_H +#ifndef TDENGINE_TEXPR_H +#define TDENGINE_TEXPR_H #ifdef __cplusplus extern "C" { @@ -62,32 +62,32 @@ typedef struct tExprNode { uint8_t nodeType; union { struct { - uint8_t optr; // filter operator - uint8_t hasPK; // 0: do not contain primary filter, 1: contain - void * info; // support filter operation on this expression only available for leaf node - + uint8_t optr; // filter operator + uint8_t hasPK; // 0: do not contain primary filter, 1: contain + void *info; // support filter operation on this expression only available for leaf node struct tExprNode *pLeft; // left child pointer struct tExprNode *pRight; // right child pointer } _node; - struct SSchema *pSchema; - tVariant * pVal; + + struct SSchema *pSchema; + tVariant *pVal; }; } tExprNode; typedef struct SExprTraverseSupp { __result_filter_fn_t nodeFilterFn; __do_filter_suppl_fn_t setupInfoFn; - void * pExtInfo; + void *pExtInfo; } SExprTraverseSupp; void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)); +void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); tExprNode* exprTreeFromBinary(const void* data, size_t size); tExprNode* exprTreeFromTableName(const char* tbnameCond); +tExprNode* exprdup(tExprNode* pTree); -void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); - -bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param); +bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param); typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType, void *output, int32_t order); @@ -99,4 +99,4 @@ void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, } #endif -#endif // TDENGINE_TAST_H +#endif // TDENGINE_TEXPR_H diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 465b298973cbccdf380150bdce46a09b56594801..f37a4d9a3667554fff60389d95e9a897ef2e4664 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -41,6 +41,35 @@ typedef struct SResPair { double avg; } SResPair; +// the structure for sql function in select clause +typedef struct SSqlExpr { + char aliasName[TSDB_COL_NAME_LEN]; // as aliasName + SColIndex colInfo; + + uint64_t uid; // refactor use the pointer + + int16_t functionId; // function id in aAgg array + + int16_t resType; // return value type + int16_t resBytes; // length of return value + int32_t interBytes; // inter result buffer size + + int16_t colType; // table column type + int16_t colBytes; // table column bytes + + int16_t numOfParams; // argument value of each function + tVariant param[3]; // parameters are not more than 3 + int32_t offset; // sub result column value of arithmetic expression. + int16_t resColId; // result column id + + SColumnFilterList flist; +} SSqlExpr; + +typedef struct SExprInfo { + SSqlExpr base; + struct tExprNode *pExpr; +} SExprInfo; + #define TSDB_DB_NAME_T 1 #define TSDB_TABLE_NAME_T 2 diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index 1008c4cf8f77ca77f59a57aea189cdebef9c9129..4334a0de263e9857385222462724d4b8c758593e 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -15,6 +15,7 @@ #include "os.h" +#include "texpr.h" #include "exception.h" #include "taosdef.h" #include "taosmsg.h" @@ -145,25 +146,25 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { *pExpr = NULL; } -bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) { +bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) { tExprNode *pLeft = pExpr->_node.pLeft; tExprNode *pRight = pExpr->_node.pRight; //non-leaf nodes, recursively traverse the expression tree in the post-root order if (pLeft->nodeType == TSQL_NODE_EXPR && pRight->nodeType == TSQL_NODE_EXPR) { if (pExpr->_node.optr == TSDB_RELATION_OR) { // or - if (exprTreeApplayFilter(pLeft, pItem, param)) { + if (exprTreeApplyFilter(pLeft, pItem, param)) { return true; } // left child does not satisfy the query condition, try right child - return exprTreeApplayFilter(pRight, pItem, param); + return exprTreeApplyFilter(pRight, pItem, param); } else { // and - if (!exprTreeApplayFilter(pLeft, pItem, param)) { + if (!exprTreeApplyFilter(pLeft, pItem, param)) { return false; } - return exprTreeApplayFilter(pRight, pItem, param); + return exprTreeApplyFilter(pRight, pItem, param); } } @@ -463,3 +464,28 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { CLEANUP_EXECUTE_TO(anchor, false); return expr; } + +tExprNode* exprdup(tExprNode* pTree) { + if (pTree == NULL) { + return NULL; + } + + tExprNode* pNode = calloc(1, sizeof(tExprNode)); + if (pTree->nodeType == TSQL_NODE_EXPR) { + tExprNode* pLeft = exprdup(pTree->_node.pLeft); + tExprNode* pRight = exprdup(pTree->_node.pRight); + + pNode->nodeType = TSQL_NODE_EXPR; + pNode->_node.pLeft = pLeft; + pNode->_node.pRight = pRight; + } else if (pTree->nodeType == TSQL_NODE_VALUE) { + pNode->pVal = calloc(1, sizeof(tVariant)); + tVariantAssign(pNode->pVal, pTree->pVal); + } else if (pTree->nodeType == TSQL_NODE_COL) { + pNode->pSchema = calloc(1, sizeof(SSchema)); + *pNode->pSchema = *pTree->pSchema; + } + + return pNode; +} + diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 69b01e6c089c0af814ebfb49724458990d5320fc..1524f15b7d5df38fe5f9e75d5dd88c4dd6553f0d 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int8_t tsEnableBalance = 1; int8_t tsAlternativeRole = 0; int32_t tsBalanceInterval = 300; // seconds -int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days +int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days int32_t tsMnodeEqualVnodeNum = 4; int8_t tsEnableFlowCtrl = 1; int8_t tsEnableSlaveQuery = 1; @@ -921,7 +921,7 @@ static void doInitGlobalConfig(void) { cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = -1; - cfg.maxValue = 10000000; + cfg.maxValue = 10000000000.0f; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 787aa1e95b0b361d1fe4fc6931e7da04aa419c65..f1ddc60637323de747d077011a93b2602e4764f8 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -68,6 +68,7 @@ bool tscValidateTableNameLength(size_t len) { return len < TSDB_TABLE_NAME_LEN; } +// TODO refactor SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) { if (numOfFilters == 0) { assert(src == NULL); diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index 7009c4d5c3d8516f17dd4caf89b37e9c28f93274..c872d8731b572661a35ca91248afd167fefca0bf 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -48,8 +48,19 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) { case TSDB_DATA_TYPE_INT:{ ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true); if (ret != 0) { - pVar->nType = -1; // -1 means error type - return; + SStrToken t = {0}; + tSQLGetToken(token->z, &t.type); + if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN + pVar->nType = -1; // -1 means error type + return; + } + + // data overflow, try unsigned parse the input number + ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, false); + if (ret != 0) { + pVar->nType = -1; // -1 means error type + return; + } } break; @@ -525,6 +536,8 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result } bool code = false; + + uint64_t ui = 0; switch(type) { case TSDB_DATA_TYPE_TINYINT: code = IS_VALID_TINYINT(*result); break; @@ -535,13 +548,17 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result case TSDB_DATA_TYPE_BIGINT: code = IS_VALID_BIGINT(*result); break; case TSDB_DATA_TYPE_UTINYINT: - code = IS_VALID_UTINYINT(*result); break; + ui = *result; + code = IS_VALID_UTINYINT(ui); break; case TSDB_DATA_TYPE_USMALLINT: - code = IS_VALID_USMALLINT(*result); break; + ui = *result; + code = IS_VALID_USMALLINT(ui); break; case TSDB_DATA_TYPE_UINT: - code = IS_VALID_UINT(*result); break; + ui = *result; + code = IS_VALID_UINT(ui); break; case TSDB_DATA_TYPE_UBIGINT: - code = IS_VALID_UBIGINT(*result); break; + ui = *result; + code = IS_VALID_UBIGINT(ui); break; } return code? 0:-1; diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs index 205269501d376a4753b3aedbfa8d512b2df31600..2c150341f62d16372a99d341a495771e4c2a3dbc 100644 --- a/src/connector/C#/TDengineDriver.cs +++ b/src/connector/C#/TDengineDriver.cs @@ -31,7 +31,11 @@ namespace TDengineDriver TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes TSDB_DATA_TYPE_BINARY = 8, // string TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes } enum TDengineInitOption @@ -53,15 +57,23 @@ namespace TDengineDriver switch ((TDengineDataType)type) { case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; + return "BOOL"; case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; + return "TINYINT"; case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; + return "SMALLINT"; case TDengineDataType.TSDB_DATA_TYPE_INT: return "INT"; case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; case TDengineDataType.TSDB_DATA_TYPE_FLOAT: return "FLOAT"; case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 83f7563dc339e5ad52bb4cb08acf1f155e8d8b50..de4b8f6bfb92d2de0810dab50c70452f7d47b7a8 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.26-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 611b1c608a10f11dfb89c5d66feb1077097a524d..a31796ffdeeb4b76835e4b04d40720dda149fddc 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.26 + 2.0.28 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 719cd4093800fdb96f32f0ed2b40ba6b2f1b4706..3400a82e73640c79f6157fafd2bbe8c7520145dd 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.26 + 2.0.28 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index 976078da95c387110a2e5e55076a42a2c320ca18..2970f6c2d37d73476ee0d8831a68128569766fb4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -4,13 +4,23 @@ import java.sql.*; import java.util.Enumeration; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.concurrent.*; public abstract class AbstractConnection extends WrapperImpl implements Connection { protected volatile boolean isClosed; protected volatile String catalog; - protected volatile Properties clientInfoProps = new Properties(); + protected final Properties clientInfoProps = new Properties(); + + protected AbstractConnection(Properties properties) { + Set propNames = properties.stringPropertyNames(); + for (String propName : propNames) { + clientInfoProps.setProperty(propName, properties.getProperty(propName)); + } + String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING"); + clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat); + } @Override public abstract Statement createStatement() throws SQLException; @@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } - @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (isClosed()) @@ -441,9 +450,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - if (clientInfoProps == null) - clientInfoProps = new Properties(); - clientInfoProps.setProperty(name, value); + if (clientInfoProps != null) + clientInfoProps.setProperty(name, value); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java index abd348e68c3ebac0d940b4c9789a85ddd68e238e..4b5b88d93bdf14b2387708a3e24229d0fd5d74d7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -10,6 +10,7 @@ import java.util.Map; public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { private int fetchSize; + protected boolean wasNull; protected void checkAvailability(int columnIndex, int bounds) throws SQLException { if (isClosed()) @@ -28,7 +29,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public boolean wasNull() throws SQLException { - return false; + return wasNull; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 5b4615485dd21b7d5e41e513f48116edf53594d0..c8ab9fb15ae0ece504ff825dbbe16e12408aede0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection { } public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { + super(info); this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 5f599df1300b3d6959e9a7b58c00a720d722e07b..bbd8519a0315015e6e775362f997d21f1af8930a 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -95,6 +95,11 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; + /** + * timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc + */ + public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat"; + private TSDBDatabaseMetaData dbMetaData = null; static { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index a20ddaa836535b2249ecc1a05e74a7903b9caeeb..2576a25f0d40e15efbb958706f672a40fbd2ee2e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -203,7 +203,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); + Object value = this.rowData.get(columnIndex - 1); + if (value instanceof Timestamp) + res = ((Timestamp) value).getTime(); + else + res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } @@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, this.columnMetaDataList.size()); Timestamp res = null; - if (this.getBatchFetch()) return this.blockData.getTimestamp(columnIndex - 1); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 7cf5f0d79a89d0e6ed21d6b18136b92dc2c0b131..34470fbc4e5c06caa117737923612478d7039a90 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -17,6 +17,7 @@ package com.taosdata.jdbc; import java.math.BigDecimal; import java.sql.SQLException; import java.sql.Timestamp; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; @@ -299,7 +300,19 @@ public class TSDBResultSetRowData { } public void setTimestamp(int col, long ts) { - data.set(col, new Timestamp(ts)); + //TODO: this implementation contains logical error + // when precision is us the (long ts) is 16 digital number + // when precision is ms, the (long ts) is 13 digital number + // we need a JNI function like this: + // public void setTimestamp(int col, long epochSecond, long nanoAdjustment) + if (ts < 1_0000_0000_0000_0L) { + data.set(col, new Timestamp(ts)); + } else { + long epochSec = ts / 1000_000l; + long nanoAdjustment = ts % 1000_000l * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + data.set(col, timestamp); + } } public Timestamp getTimestamp(int col) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 1f3ed2d144a88b10c0b1a8947e70def2c3db42a9..b810f9aeb59247ef618451b542a678d6c951ad3f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection { private final DatabaseMetaData metadata; public RestfulConnection(String host, String port, Properties props, String database, String url) { + super(props); this.host = host; this.port = Integer.parseInt(port); this.database = database; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 5c2d4c45b079974672843b232e38fcd9d010b0c4..db635f5f79f187dab98cb9fdfdc50369ca682fa9 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -5,13 +5,12 @@ import com.alibaba.fastjson.JSONObject; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import com.google.common.primitives.Shorts; -import com.taosdata.jdbc.AbstractResultSet; -import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.TSDBError; -import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.*; import java.math.BigDecimal; import java.sql.*; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Calendar; @@ -19,6 +18,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; + private final String database; private final Statement statement; // data @@ -65,7 +65,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - private Object parseColumnData(JSONArray row, int colIndex, int taosType) { + private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException { switch (taosType) { case TSDBConstants.TSDB_DATA_TYPE_BOOL: return row.getBoolean(colIndex); @@ -81,8 +81,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return row.getFloat(colIndex); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return row.getDouble(colIndex); - case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: - return new Timestamp(row.getDate(colIndex).getTime()); + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + if (row.get(colIndex) == null) + return null; + String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) { + Long value = row.getLong(colIndex); + //TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9 + if (value < 1_0000_0000_0000_0L) + return new Timestamp(value); + long epochSec = value / 1000_000l; + long nanoAdjustment = value % 1000_000l * 1000l; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if ("UTC".equalsIgnoreCase(timestampFormat)) { + String value = row.getString(colIndex); + long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; + int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); + long nanoAdjustment = 0; + if (value.length() > 28) { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 + nanoAdjustment = fractionalSec * 1000l; + } else { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 + nanoAdjustment = fractionalSec * 1000_000l; + } + ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); + Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); + return Timestamp.from(instant); + } + String value = row.getString(colIndex); + if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS + return row.getTimestamp(colIndex); + // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + return timestamp; + } case TSDBConstants.TSDB_DATA_TYPE_BINARY: return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: @@ -126,12 +162,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - @Override - public boolean wasNull() throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - return resultSet.isEmpty(); - } +// @Override +// public boolean wasNull() throws SQLException { +// if (isClosed()) +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); +// return resultSet.isEmpty(); +// } @Override public String getString(int columnIndex) throws SQLException { @@ -150,8 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return false; + } + wasNull = false; if (value instanceof Boolean) return (boolean) value; return Boolean.valueOf(value.toString()); @@ -162,8 +201,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Byte.MIN_VALUE) return 0; @@ -183,8 +225,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Short.MIN_VALUE) return 0; @@ -198,8 +243,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; long valueAsLong = Long.parseLong(value.toString()); if (valueAsLong == Integer.MIN_VALUE) return 0; @@ -213,9 +261,14 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; - + } + wasNull = false; + if (value instanceof Timestamp) { + return ((Timestamp) value).getTime(); + } long valueAsLong = 0; try { valueAsLong = Long.parseLong(value.toString()); @@ -232,8 +285,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; if (value instanceof Float || value instanceof Double) return (float) value; return Float.parseFloat(value.toString()); @@ -244,8 +300,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, resultSet.get(pos).size()); Object value = resultSet.get(pos).get(columnIndex - 1); - if (value == null) + if (value == null) { + wasNull = true; return 0; + } + wasNull = false; if (value instanceof Double || value instanceof Float) return (double) value; return Double.parseDouble(value.toString()); @@ -307,6 +366,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return null; if (value instanceof Timestamp) return (Timestamp) value; +// if (value instanceof Long) { +// if (1_0000_0000_0000_0L > (long) value) +// return Timestamp.from(Instant.ofEpochMilli((long) value)); +// long epochSec = (long) value / 1000_000L; +// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000); +// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); +// } return Timestamp.valueOf(value.toString()); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 9071c046726a0ef7df3f712ac3ddbb10d3afafba..e9cc3a009fc339dc05ae1a9ad70e682eff22a616 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.AbstractStatement; +import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.utils.HttpClientPoolUtil; @@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - return executeOneQuery(url, sql); + return executeOneQuery(sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); - return executeOneQuery(url, sql); + return executeOneQuery(sql); } @Override @@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement { return executeOneUpdate(url, sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneUpdate(url, sql); } @@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement { //如果执行了use操作应该将当前Statement的catalog设置为新的database boolean result = true; - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + } + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + } + if (SqlSyntaxValidator.isUseSql(sql)) { HttpClientPoolUtil.execute(url, sql); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - executeOneQuery(url, sql); + executeOneQuery(sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { executeOneUpdate(url, sql); result = false; @@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement { return result; } - private ResultSet executeOneQuery(String url, String sql) throws SQLException { + private ResultSet executeOneQuery(String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); // row data + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + if ("UTC".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + String result = HttpClientPoolUtil.execute(url, sql); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java new file mode 100644 index 0000000000000000000000000000000000000000..04a11a2beb00832a20d9d2d887a8ce5a68e5cf54 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java @@ -0,0 +1,12 @@ +package com.taosdata.jdbc.utils; + +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; + +public class UtcTimestampUtil { + public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder() + .appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+") +// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true) + .toFormatter(); + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java new file mode 100644 index 0000000000000000000000000000000000000000..782125144c4fbe8dcc4bdfd4769e95e5119ea32f --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcJniTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java new file mode 100644 index 0000000000000000000000000000000000000000..c6fba81eb24b9b8c08fd553ca57b1e1d68bb81e0 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java @@ -0,0 +1,91 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.TimestampUtil; +import org.junit.*; + +import java.sql.*; +import java.util.Properties; + +public class TD3841Test { + private static final String host = "127.0.0.1"; + private static Properties properties; + private static Connection conn_restful; + private static Connection conn_jni; + + @Test + public void testRestful() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn_restful = DriverManager.getConnection(url, properties); + + try (Statement stmt = conn_restful.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")"); + ResultSet rs = stmt.executeQuery("select * from weather"); + rs.next(); + + Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime())); + Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull()); + Assert.assertEquals(null, rs.getBytes(9)); + Assert.assertEquals(null, rs.getString(10)); + + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testJNI() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn_jni = DriverManager.getConnection(url, properties); + + try (Statement stmt = conn_jni.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")"); + ResultSet rs = stmt.executeQuery("select * from weather"); + rs.next(); + + Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime())); + Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull()); + Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull()); + Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull()); + Assert.assertEquals(null, rs.getBytes(9)); + Assert.assertEquals(null, rs.getString(10)); + + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + } + + @AfterClass + public static void afterClass() throws SQLException { + if (conn_restful != null) + conn_restful.close(); + if (conn_jni != null) + conn_jni.close(); + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f9b111bb12f189a7a42c7944237aa01ebb008d25 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java @@ -0,0 +1,89 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInJniTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn; + + @Test + public void testCase1() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + System.out.println(timestamp); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java new file mode 100644 index 0000000000000000000000000000000000000000..ed4f979ef33354595bfeb707678e3b9797aba686 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java @@ -0,0 +1,168 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn1; + private static Connection conn2; + private static Connection conn3; + + @Test + public void testCase1() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase3() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + Timestamp rsTimestamp = rs.getTimestamp(1); + long ts = rsTimestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase4() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase5() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase6() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); +// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP"); + + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn1 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=timestamp"; + conn2 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=utc"; + conn3 = DriverManager.getConnection(url, properties); + + Statement stmt = conn1.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn1 != null) + conn1.close(); + if (conn2 != null) + conn2.close(); + if (conn3 != null) + conn3.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py index d1fca047c67861706e6e6afc4bc05cd888bff755..ff2d90fcb3dff5965572452672a7491dd4aa44d1 100644 --- a/src/connector/python/linux/python2/setup.py +++ b/src/connector/python/linux/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.7", + version="2.0.8", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index 8f9aab82da64d24645311d1263f9abb006c737eb..4c0456b5031df18ec974155518b67e5f2f1bee0c 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -42,7 +42,7 @@ class TDengineCursor(object): def __iter__(self): return self - def __next__(self): + def next(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetch iterator") diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index d4d202267c37365adc2647df3874185bc669b4a2..5d5d5f339eec62db09f4c93acad0cea096c0962d 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -310,8 +310,23 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch } SCqContext *pContext = handle; int64_t rid = 0; + + pthread_mutex_lock(&pContext->mutex); + + SCqObj *pObj = pContext->pHead; + while (pObj) { + if (pObj->uid == uid) { + rid = pObj->rid; + pthread_mutex_unlock(&pContext->mutex); + return (void *)rid; + } + + pObj = pObj->next; + } - SCqObj *pObj = calloc(sizeof(SCqObj), 1); + pthread_mutex_unlock(&pContext->mutex); + + pObj = calloc(sizeof(SCqObj), 1); if (pObj == NULL) return NULL; pObj->uid = uid; @@ -386,12 +401,15 @@ static void doCreateStream(void *param, TAOS_RES *result, int32_t code) { if (pObj == NULL) { return; } - + SCqContext* pContext = pObj->pContext; - SSqlObj* pSql = (SSqlObj*)result; - if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) { - taos_close(pSql->pTscObj); + SSqlObj* pSql = (SSqlObj*)result; + if (code == TSDB_CODE_SUCCESS) { + if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) { + taos_close(pSql->pTscObj); + } } + pthread_mutex_lock(&pContext->mutex); cqCreateStream(pContext, pObj); pthread_mutex_unlock(&pContext->mutex); @@ -427,10 +445,11 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, (void *)pObj->rid, pContext->tmrCtrl); return; } + pObj->tmrId = 0; if (pObj->pStream == NULL) { - pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, (void *)pObj->rid, NULL); + pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL); // TODO the pObj->pStream may be released if error happens if (pObj->pStream) { diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 024bc198df80bd42f547cd89543c028403acccd3..e9170860a6db3e139c45c85fea6c9a19a0e44d63 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -243,8 +243,9 @@ do { \ #define TSDB_MAX_REPLICA 5 #define TSDB_TBNAME_COLUMN_INDEX (-1) -#define TSDB_BLOCK_DIST_COLUMN_INDEX (-2) -#define TSDB_UD_COLUMN_INDEX (-100) +#define TSDB_BLOCK_DIST_COLUMN_INDEX (-2) +#define TSDB_UD_COLUMN_INDEX (-1000) +#define TSDB_RES_COL_ID (-5000) #define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta @@ -388,9 +389,10 @@ typedef enum { typedef enum { TSDB_SUPER_TABLE = 0, // super table TSDB_CHILD_TABLE = 1, // table created from super table - TSDB_NORMAL_TABLE = 2, // ordinary table - TSDB_STREAM_TABLE = 3, // table created from stream computing - TSDB_TABLE_MAX = 4 + TSDB_NORMAL_TABLE = 2, // ordinary table + TSDB_STREAM_TABLE = 3, // table created from stream computing + TSDB_TEMP_TABLE = 4, // temp table created by nest query + TSDB_TABLE_MAX = 5 } ETableType; typedef enum { diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index d7ac7dd277c0487b97096821753c904da137e534..3b7022fb88d34e0e71a3f70ace85d769e338b11e 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -399,7 +399,6 @@ typedef struct SColIndex { char name[TSDB_COL_NAME_LEN]; // TODO remove it } SColIndex; - typedef struct SColumnFilterInfo { int16_t lowerRelOptr; int16_t upperRelOptr; @@ -421,42 +420,13 @@ typedef struct SColumnFilterInfo { }; } SColumnFilterInfo; -/* sql function msg, to describe the message to vnode about sql function - * operations in select clause */ -typedef struct SSqlFuncMsg { - int16_t functionId; - int16_t numOfParams; - - int16_t resColId; // result column id, id of the current output column - int16_t colType; - int16_t colBytes; - - SColIndex colInfo; - struct ArgElem { - int16_t argType; - int16_t argBytes; - union { - double d; - int64_t i64; - char * pz; - } argValue; - } arg[3]; - - int32_t filterNum; - SColumnFilterInfo filterInfo[]; -} SSqlFuncMsg; - - -typedef struct SExprInfo { - SColumnFilterInfo * pFilter; - struct tExprNode* pExpr; - int16_t bytes; - int16_t type; - int32_t interBytes; - int64_t uid; - SSqlFuncMsg base; -} SExprInfo; - +typedef struct SColumnFilterList { + int16_t numOfFilters; + union{ + int64_t placeholder; + SColumnFilterInfo *filterInfo; + }; +} SColumnFilterList; /* * for client side struct, we only need the column id, type, bytes are not necessary * But for data in vnode side, we need all the following information. @@ -465,11 +435,7 @@ typedef struct SColumnInfo { int16_t colId; int16_t type; int16_t bytes; - int16_t numOfFilters; - union{ - int64_t placeholder; - SColumnFilterInfo *filters; - }; + SColumnFilterList flist; } SColumnInfo; typedef struct STableIdInfo { @@ -483,10 +449,29 @@ typedef struct STimeWindow { TSKEY ekey; } STimeWindow; +typedef struct { + int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed + int32_t tsLen; // total length of ts comp block + int32_t tsNumOfBlocks; // ts comp block numbers + int32_t tsOrder; // ts comp block order +} STsBufInfo; + typedef struct { SMsgHead head; char version[TSDB_VERSION_LEN]; + bool stableQuery; // super table query or not + bool topBotQuery; // TODO used bitwise flag + bool groupbyColumn; // denote if this is a groupby normal column query + bool hasTagResults; // if there are tag values in final result or not + bool timeWindowInterpo;// if the time window start/end required interpolation + bool queryBlockDist; // if query data block distribution + bool stabledev; // super table stddev query + bool tsCompQuery; // is tscomp query + bool simpleAgg; + bool pointInterpQuery; // point interpolation query + bool needReverseScan; // need reverse scan + STimeWindow window; int32_t numOfTables; int16_t order; @@ -509,14 +494,13 @@ typedef struct { int16_t fillType; // interpolate type uint64_t fillVal; // default value array list int32_t secondStageOutput; - int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed - int32_t tsLen; // total length of ts comp block - int32_t tsNumOfBlocks; // ts comp block numbers - int32_t tsOrder; // ts comp block order + STsBufInfo tsBuf; // tsBuf info int32_t numOfTags; // number of tags columns involved int32_t sqlstrLen; // sql query string int32_t prevResultLen; // previous result length - SColumnInfo colList[]; + int32_t numOfOperator; + int32_t tableScanOperator;// table scan operator. -1 means no scan operator + SColumnInfo tableCols[]; } SQueryTableMsg; typedef struct { @@ -827,7 +811,7 @@ typedef struct { uint32_t queryId; int64_t useconds; int64_t stime; - uint64_t qHandle; + uint64_t qId; } SQueryDesc; typedef struct { diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 85ee9f0443bcab328cd086f32723f0599bd4c353..1ba5131f6db82cf7f0fdd512b7dbf94bd68914dc 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -221,7 +221,7 @@ typedef struct { typedef struct { uint32_t numOfTables; - SArray * pGroupList; + SArray *pGroupList; SHashObj *map; // speedup acquire the tableQueryInfo by table uid } STableGroupInfo; diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 5f47d9896fa50042ac6b090f4ba53103bb620648..e9f95660f7d6defb6a8dd63b7431bd0ab913b4ab 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -205,11 +205,6 @@ #define TK_VALUES 186 - - - - - #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 3f6b3da9bf4b1f80f58a06613e8571ca16892c46..37050c416c39d0624e620bf2e4a5fc7b7dfdc071 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -37,7 +37,7 @@ static struct argp_option options[] = { {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, - {"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."}, + {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, {"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."}, {"dump-config", 'C', 0, 0, "Dump configuration."}, {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 11c1ac34d8bc6058b33699c4d65d9d1224b88d31..87d11a3516a65e83201bf4ebe51f07e5394d5cdf 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -199,15 +199,19 @@ void updateBuffer(Command *cmd) { } int isReadyGo(Command *cmd) { - char total[MAX_COMMAND_SIZE]; + char *total = malloc(MAX_COMMAND_SIZE); memset(total, 0, MAX_COMMAND_SIZE); sprintf(total, "%s%s", cmd->buffer, cmd->command); char *reg_str = "(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^" "\\s*clear\\s*$)"; - if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) return 1; + if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) { + free(total); + return 1; + } + free(total); return 0; } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 30b78a9b210300523310a1456f42bb14104b528a..42992b782f1a92c585a86902ad1ebfe797863192 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -81,7 +81,7 @@ enum QUERY_MODE { #define MAX_DB_NAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 +#define MAX_DATA_SIZE (16*1024) #define MAX_NUM_DATATYPE 10 #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 @@ -97,7 +97,7 @@ enum QUERY_MODE { #define MAX_TAG_COUNT 128 #define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH 256 +#define MAX_QUERY_SQL_LENGTH 1024 #define MAX_DATABASE_COUNT 256 #define INPUT_BUF_LEN 256 @@ -210,13 +210,13 @@ typedef struct SArguments_S { int len_of_binary; int num_of_CPR; int num_of_threads; - int insert_interval; - int query_times; - int interlace_rows; - int num_of_RPR; - int max_sql_len; - int num_of_tables; - int num_of_DPT; + int64_t insert_interval; + int64_t query_times; + int64_t interlace_rows; + int64_t num_of_RPR; // num_of_records_per_req + int64_t max_sql_len; + int64_t num_of_tables; + int64_t num_of_DPT; int abort; int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision @@ -235,25 +235,25 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; - int childTblCount; + int64_t childTblCount; bool childTblExists; // 0: no, 1: yes - int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful - int childTblLimit; - int childTblOffset; + int64_t childTblLimit; + int64_t childTblOffset; - int multiThreadWriteOneTbl; // 0: no, 1: yes - int interlaceRows; // +// int multiThreadWriteOneTbl; // 0: no, 1: yes + int64_t interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int maxSqlLen; // + int64_t maxSqlLen; // - int insertInterval; // insert interval, will override global insert interval - int64_t insertRows; // 0: no limit - int timeStampStep; + int64_t insertInterval; // insert interval, will override global insert interval + int64_t insertRows; + int64_t timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; @@ -266,8 +266,8 @@ typedef struct SSuperTable_S { char* childTblName; char* colsOfCreateChildTable; - int lenOfOneRow; - int lenOfTagOfOneRow; + int64_t lenOfOneRow; + int64_t lenOfTagOfOneRow; char* sampleDataBuf; //int sampleRowCount; @@ -279,8 +279,8 @@ typedef struct SSuperTable_S { int tagUsePos; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + int64_t totalInsertRows; + int64_t totalAffectedRows; } SSuperTable; typedef struct { @@ -327,7 +327,7 @@ typedef struct SDataBase_S { char dbName[MAX_DB_NAME_SIZE]; bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; - int superTblCount; + int64_t superTblCount; SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; } SDataBase; @@ -349,44 +349,44 @@ typedef struct SDbs_S { SDataBase db[MAX_DB_COUNT]; // statistics - int64_t totalInsertRows; - int64_t totalAffectedRows; + int64_t totalInsertRows; + int64_t totalAffectedRows; } SDbs; typedef struct SpecifiedQueryInfo_S { - int rate; // 0: unlimit > 0 loop/s - int concurrent; - int sqlCount; + int64_t queryInterval; // 0: unlimit > 0 loop/s + int64_t concurrent; + int64_t sqlCount; int mode; // 0: sync, 1: async - int subscribeInterval; // ms - int queryTimes; + int64_t subscribeInterval; // ms + int64_t queryTimes; int subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - int totalQueried; + int64_t totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; - int rate; // 0: unlimit > 0 loop/s + int64_t queryInterval; // 0: unlimit > 0 loop/s int threadCnt; int mode; // 0: sync, 1: async - int subscribeInterval; // ms + int64_t subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; - int queryTimes; - int childTblCount; + int64_t queryTimes; + int64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; + int64_t sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; - int totalQueried; + int64_t totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { @@ -400,7 +400,7 @@ typedef struct SQueryMetaInfo_S { SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; - int totalQueried; + int64_t totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { @@ -410,11 +410,11 @@ typedef struct SThreadInfo_S { uint32_t time_precision; char fp[4096]; char tb_prefix[MAX_TB_NAME_SIZE]; - int start_table_from; - int end_table_to; - int ntables; - int data_of_rate; - uint64_t start_time; + int64_t start_table_from; + int64_t end_table_to; + int64_t ntables; + int64_t data_of_rate; + int64_t start_time; char* cols; bool use_metric; SSuperTable* superTblInfo; @@ -427,7 +427,7 @@ typedef struct SThreadInfo_S { int64_t lastTs; // sample data - int samplePos; + int64_t samplePos; // statistics int64_t totalInsertRows; int64_t totalAffectedRows; @@ -440,7 +440,7 @@ typedef struct SThreadInfo_S { int64_t minDelay; // query - int querySeq; // sequence number of sql command + int64_t querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -554,25 +554,19 @@ SArguments g_args = { "./output.txt", // output_file 0, // mode : sync or async { - "TINYINT", // datatype - "SMALLINT", - "INT", - "BIGINT", - "FLOAT", - "DOUBLE", - "BINARY", - "NCHAR", - "BOOL", - "TIMESTAMP" + "INT", // datatype + "INT", // datatype + "INT", // datatype + "INT", // datatype }, 16, // len_of_binary - 10, // num_of_CPR + 4, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval 1, // query_times 0, // interlace_rows; - 100, // num_of_RPR - TSDB_PAYLOAD_SIZE, // max_sql_len + 30000, // num_of_RPR + 1024000, // max_sql_len 10000, // num_of_tables 10000, // num_of_DPT 0, // abort @@ -667,7 +661,7 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-o", indent, "Direct output to the named file. Default is './output.txt'."); printf("%s%s%s%s\n", indent, "-q", indent, - "Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); + "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); printf("%s%s%s%s\n", indent, "-b", indent, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP."); printf("%s%s%s%s\n", indent, "-w", indent, @@ -679,7 +673,7 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-i", indent, "The sleep time (ms) between insertion. Default is 0."); printf("%s%s%s%s\n", indent, "-r", indent, - "The number of records per request. Default is 100."); + "The number of records per request. Default is 30000."); printf("%s%s%s%s\n", indent, "-t", indent, "The number of tables. Default is 10000."); printf("%s%s%s%s\n", indent, "-n", indent, @@ -692,13 +686,30 @@ static void printHelp() { "Out of order data's range, ms, default is 1000."); printf("%s%s%s%s\n", indent, "-g", indent, "Print debug info."); - printf("%s%s%s%s\n", indent, "-V, --version", indent, + printf("%s%s%s\n", indent, "-V, --version\t", "Print version info."); + printf("%s%s%s%s\n", indent, "--help\t", indent, + "Print command line arguments list info."); /* printf("%s%s%s%s\n", indent, "-D", indent, "if elete database if exists. 0: no, 1: yes, default is 1"); */ } +static bool isStringNumber(char *input) +{ + int len = strlen(input); + if (0 == len) { + return false; + } + + for (int i = 0; i < len; i++) { + if (!isdigit(input[i])) + return false; + } + + return true; +} + static void parse_args(int argc, char *argv[], SArguments *arguments) { char **sptr; @@ -706,39 +717,134 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (strcmp(argv[i], "-f") == 0) { arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-c need a valid path following!\n"); + exit(EXIT_FAILURE); + } + tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); } else if (strcmp(argv[i], "-h") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-h need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->host = argv[++i]; } else if (strcmp(argv[i], "-p") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-p need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->port = atoi(argv[++i]); } else if (strcmp(argv[i], "-u") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-u need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->user = argv[++i]; } else if (strcmp(argv[i], "-P") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-P need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->password = argv[++i]; } else if (strcmp(argv[i], "-o") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-o need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->output_file = argv[++i]; } else if (strcmp(argv[i], "-s") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-s need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n"); + exit(EXIT_FAILURE); + } arguments->query_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-T need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-i need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->insert_interval = atoi(argv[++i]); } else if (strcmp(argv[i], "-qt") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-qt need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-B need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->interlace_rows = atoi(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-r need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->num_of_RPR = atoi(argv[++i]); } else if (strcmp(argv[i], "-t") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-t need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->num_of_tables = atoi(argv[++i]); } else if (strcmp(argv[i], "-n") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-n need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->num_of_DPT = atoi(argv[++i]); } else if (strcmp(argv[i], "-d") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-d need a valid string following!\n"); + exit(EXIT_FAILURE); + } arguments->database = argv[++i]; } else if (strcmp(argv[i], "-l") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-l need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->num_of_CPR = atoi(argv[++i]); } else if (strcmp(argv[i], "-b") == 0) { sptr = arguments->datatype; @@ -755,7 +861,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { printHelp(); - ERROR_EXIT( "Invalid data_type!\n"); + errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } sptr[0] = argv[i]; @@ -777,7 +883,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "NCHAR")) { printHelp(); free(dupstr); - ERROR_EXIT("Invalid data_type!\n"); + errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } sptr[index++] = token; @@ -788,8 +894,20 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { sptr[index] = NULL; } } else if (strcmp(argv[i], "-w") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-w need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->len_of_binary = atoi(argv[++i]); } else if (strcmp(argv[i], "-m") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-m need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->tb_prefix = argv[++i]; } else if (strcmp(argv[i], "-N") == 0) { arguments->use_metric = false; @@ -804,22 +922,42 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-pp") == 0) { arguments->performance_print = true; } else if (strcmp(argv[i], "-O") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-O need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->disorderRatio = atoi(argv[++i]); - if (arguments->disorderRatio > 50) + if (arguments->disorderRatio > 50) { arguments->disorderRatio = 50; + } - if (arguments->disorderRatio < 0) + if (arguments->disorderRatio < 0) { arguments->disorderRatio = 0; + } } else if (strcmp(argv[i], "-R") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-R need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->disorderRange = atoi(argv[++i]); if (arguments->disorderRange < 0) arguments->disorderRange = 1000; } else if (strcmp(argv[i], "-a") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-a need a number following!\n"); + exit(EXIT_FAILURE); + } arguments->replica = atoi(argv[++i]); if (arguments->replica > 3 || arguments->replica < 1) { arguments->replica = 1; @@ -839,7 +977,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(0); } else { printHelp(); - ERROR_EXIT("ERROR: wrong options\n"); + errorPrint("%s", "ERROR: wrong options\n"); exit(EXIT_FAILURE); } } @@ -863,13 +1001,18 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { break; printf("\n"); } - printf("# Insertion interval: %d\n", arguments->insert_interval); - printf("# Number of records per req: %d\n", arguments->num_of_RPR); - printf("# Max SQL length: %d\n", arguments->max_sql_len); + printf("# Insertion interval: %"PRId64"\n", + arguments->insert_interval); + printf("# Number of records per req: %"PRId64"\n", + arguments->num_of_RPR); + printf("# Max SQL length: %"PRId64"\n", + arguments->max_sql_len); printf("# Length of Binary: %d\n", arguments->len_of_binary); printf("# Number of Threads: %d\n", arguments->num_of_threads); - printf("# Number of Tables: %d\n", arguments->num_of_tables); - printf("# Number of Data per Table: %d\n", arguments->num_of_DPT); + printf("# Number of Tables: %"PRId64"\n", + arguments->num_of_tables); + printf("# Number of Data per Table: %"PRId64"\n", + arguments->num_of_DPT); printf("# Database name: %s\n", arguments->database); printf("# Table prefix: %s\n", arguments->tb_prefix); if (arguments->disorderRatio) { @@ -1003,17 +1146,6 @@ static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) taos_free_result(res); } -static double getCurrentTimeUs() { - struct timeval tv; - - if (gettimeofday(&tv, NULL) != 0) { - perror("Failed to get current time in ms"); - return 0.0; - } - - return tv.tv_sec + tv.tv_usec / 1E6; -} - static int32_t rand_bool(){ static int cursor; cursor++; @@ -1063,7 +1195,7 @@ static void rand_string(char *str, int size) { //--size; int n; for (n = 0; n < size - 1; n++) { - int key = rand_tinyint() % (int)(sizeof(charset) - 1); + int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); str[n] = charset[key]; } str[n] = 0; @@ -1117,9 +1249,12 @@ static int printfInsertMeta() { printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - printf("top insert interval: \033[33m%d\033[0m\n", g_args.insert_interval); - printf("number of records per req: \033[33m%d\033[0m\n", g_args.num_of_RPR); - printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len); + printf("top insert interval: \033[33m%"PRId64"\033[0m\n", + g_args.insert_interval); + printf("number of records per req: \033[33m%"PRId64"\033[0m\n", + g_args.num_of_RPR); + printf("max sql length: \033[33m%"PRId64"\033[0m\n", + g_args.max_sql_len); printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); @@ -1180,10 +1315,10 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%d\033[0m\n", + printf(" super table count: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%d\033[0m]:\n", j); + for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); @@ -1204,7 +1339,7 @@ static int printfInsertMeta() { printf(" childTblExists: \033[33m%s\033[0m\n", "error"); } - printf(" childTblCount: \033[33m%d\033[0m\n", + printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1213,26 +1348,27 @@ static int printfInsertMeta() { printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%d\033[0m\n", + printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblLimit); } if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { - printf(" childTblOffset: \033[33m%d\033[0m\n", + printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblOffset); } printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); - +/* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); }else { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } - printf(" interlaceRows: \033[33m%d\033[0m\n", + */ + printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%d\033[0m\n", + printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertInterval); } @@ -1240,9 +1376,9 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].disorderRange); printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%d\033[0m\n", + printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - printf(" timeStampStep: \033[33m%d\033[0m\n", + printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp); @@ -1306,8 +1442,8 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - fprintf(fp, "number of records per req: %d\n", g_args.num_of_RPR); - fprintf(fp, "max sql length: %d\n", g_args.max_sql_len); + fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR); + fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -1364,7 +1500,7 @@ static void printfInsertMetaToFile(FILE* fp) { } } - fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); + fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { fprintf(fp, " super table[%d]:\n", j); @@ -1386,7 +1522,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " childTblExists: %s\n", "error"); } - fprintf(fp, " childTblCount: %d\n", + fprintf(fp, " childTblCount: %"PRId64"\n", g_Dbs.db[i].superTbls[j].childTblCount); fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); @@ -1396,26 +1532,30 @@ static void printfInsertMetaToFile(FILE* fp) { g_Dbs.db[i].superTbls[j].insertMode); fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %d\n", + fprintf(fp, " interlace rows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %d\n", + fprintf(fp, " stable insert interval: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertInterval); } - +/* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { fprintf(fp, " multiThreadWriteOneTbl: no\n"); }else { fprintf(fp, " multiThreadWriteOneTbl: yes\n"); } - fprintf(fp, " interlaceRows: %d\n", + */ + fprintf(fp, " interlaceRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].interlaceRows); fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + fprintf(fp, " maxSqlLen: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); - fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " timeStampStep: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", + g_Dbs.db[i].superTbls[j].startTimestamp); fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); @@ -1470,21 +1610,21 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.rate); - printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", + printf("query interval: \033[33m%"PRId64" ms\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryInterval); + printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%d\033[0m\n", + printf(" \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.mode); - printf("interval: \033[33m%d\033[0m\n", + printf("interval: \033[33m%"PRId64"\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); @@ -1492,27 +1632,27 @@ static void printfQueryMeta() { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", + for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); printf("super table query info:\n"); - printf("query interval: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.rate); + printf("query interval: \033[33m%"PRId64"\033[0m\n", + g_queryInfo.superQueryInfo.queryInterval); printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", + printf("childTblCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%d\033[0m\n", + printf("stb query times:\033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.mode); - printf("interval: \033[33m%d\033[0m\n", + printf("interval: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); @@ -1520,7 +1660,7 @@ static void printfQueryMeta() { g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", @@ -1831,8 +1971,10 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; request_buf = malloc(req_buf_len); - if (NULL == request_buf) - ERROR_EXIT("ERROR, cannot allocate memory."); + if (NULL == request_buf) { + errorPrint("%s", "ERROR, cannot allocate memory.\n"); + exit(EXIT_FAILURE); + } char userpass_buf[INPUT_BUF_LEN]; int mod_table[] = {0, 2, 1}; @@ -2149,7 +2291,7 @@ static int calcRowLen(SSuperTable* superTbls) { static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl, int limit, int offset) { + int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) { char command[BUFFER_SIZE] = "\0"; char limitBuf[100] = "\0"; @@ -2160,7 +2302,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* childTblName = *childTblNameOfSuperTbl; if (offset >= 0) { - snprintf(limitBuf, 100, " limit %d offset %d", limit, offset); + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"", + limit, offset); } //get all child table name use cmd: select tbname from superTblName; @@ -2225,7 +2368,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl) { + int64_t* childTblCountOfSuperTbl) { return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, @@ -2313,8 +2456,10 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, return 0; } -static int createSuperTable(TAOS * taos, char* dbName, +static int createSuperTable( + TAOS * taos, char* dbName, SSuperTable* superTbl) { + char command[BUFFER_SIZE] = "\0"; char cols[STRING_LEN] = "\0"; @@ -2563,7 +2708,7 @@ static int createDatabasesAndStables() { printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); } - debugPrint("%s() %d supertbl count:%d\n", + debugPrint("%s() LN%d supertbl count:%"PRId64"\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); int validStbCount = 0; @@ -2622,14 +2767,15 @@ static void* createTable(void *sarg) int len = 0; int batchNum = 0; - verbosePrint("%s() LN%d: Creating table from %d to %d\n", + verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n", __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (int64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, - "create table if not exists %s.%s%d %s;", + "create table if not exists %s.%s%"PRId64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); @@ -2660,7 +2806,7 @@ static void* createTable(void *sarg) } len += snprintf(buffer + len, buff_len - len, - "if not exists %s.%s%d using %s.%s tags %s ", + "if not exists %s.%s%"PRId64" using %s.%s tags %s ", pThreadInfo->db_name, superTblInfo->childTblPrefix, i, pThreadInfo->db_name, superTblInfo->sTblName, tagsValBuf); @@ -2684,7 +2830,7 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %d - %d tables\n", + printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } @@ -2702,7 +2848,7 @@ static void* createTable(void *sarg) } static int startMultiThreadCreateChildTable( - char* cols, int threads, int startFrom, int ntables, + char* cols, int threads, int64_t startFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); @@ -2717,16 +2863,16 @@ static int startMultiThreadCreateChildTable( threads = 1; } - int a = ntables / threads; + int64_t a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - int b = 0; + int64_t b = 0; b = ntables % threads; - for (int i = 0; i < threads; i++) { + for (int64_t i = 0; i < threads; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); @@ -2752,7 +2898,7 @@ static int startMultiThreadCreateChildTable( startFrom = t_info->end_table_to + 1; t_info->use_metric = true; t_info->cols = cols; - t_info->minDelay = INT16_MAX; + t_info->minDelay = INT64_MAX; pthread_create(pids + i, NULL, createTable, t_info); } @@ -2803,24 +2949,22 @@ static void createChildTables() { } else { // normal table len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); - int j = 0; - while(g_args.datatype[j]) { + for (int j = 0; j < g_args.num_of_CPR; j++) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(60)", j, g_args.datatype[j]); + ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); } else { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ", COL%d %s", j, g_args.datatype[j]); } len = strlen(tblColsBuf); - j++; } snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", + verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); startMultiThreadCreateChildTable( @@ -2948,7 +3092,7 @@ static int readSampleFromCsvFileToMem( } if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", + printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); continue; } @@ -3215,9 +3359,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n", + printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3235,7 +3379,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (maxSqlLen && maxSqlLen->type == cJSON_Number) { g_args.max_sql_len = maxSqlLen->valueint; } else if (!maxSqlLen) { - g_args.max_sql_len = TSDB_PAYLOAD_SIZE; + g_args.max_sql_len = 1024000; } else { errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); @@ -3246,7 +3390,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = INT32_MAX; + g_args.num_of_RPR = INT64_MAX; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -3718,9 +3862,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n", i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3776,7 +3920,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %d.\n", + verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -3893,11 +4037,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { printf("ERROR: failed to read json, super_table_query not found\n"); goto PARSE_OVER; } else { - cJSON* rate = cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (rate && rate->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.rate = rate->valueint; - } else if (!rate) { - g_queryInfo.specifiedQueryInfo.rate = 0; + cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); + if (queryInterval && queryInterval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; + } else if (!queryInterval) { + g_queryInfo.specifiedQueryInfo.queryInterval = 0; } cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, @@ -3916,7 +4060,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; @@ -3925,12 +4069,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } - cJSON* queryMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (queryMode && queryMode->type == cJSON_String - && queryMode->valuestring != NULL) { - if (0 == strcmp("sync", queryMode->valuestring)) { + cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (mode && mode->type == cJSON_String + && mode->valuestring != NULL) { + if (0 == strcmp("sync", mode->valuestring)) { g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; - } else if (0 == strcmp("async", queryMode->valuestring)) { + } else if (0 == strcmp("async", mode->valuestring)) { g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; } else { errorPrint("%s() LN%d, failed to read json, query mode input error\n", @@ -4033,9 +4177,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else { cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.rate = subrate->valueint; + g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; } else if (!subrate) { - g_queryInfo.superQueryInfo.rate = 0; + g_queryInfo.superQueryInfo.queryInterval = 0; } cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); @@ -4281,8 +4425,9 @@ static void postFreeResource() { } } -static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int* sampleUsePos) { +static int getRowDataFromSample( + char* dataBuf, int64_t maxLen, int64_t timestamp, + SSuperTable* superTblInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { /* int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { @@ -4307,10 +4452,10 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, return dataLen; } -static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; +static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { + int64_t dataLen = 0; char *pstr = recBuf; - int maxLen = MAX_DATA_SIZE; + int64_t maxLen = MAX_DATA_SIZE; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); @@ -4377,7 +4522,7 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo return strlen(recBuf); } -static int32_t generateData(char *recBuf, char **data_type, +static int64_t generateData(char *recBuf, char **data_type, int num_of_cols, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; @@ -4395,7 +4540,7 @@ static int32_t generateData(char *recBuf, char **data_type, exit(-1); } - for (int i = 0; i < num_of_cols; i++) { + for (int i = 0; i < c; i++) { if (strcasecmp(data_type[i % c], "tinyint") == 0) { pstr += sprintf(pstr, ", %d", rand_tinyint() ); } else if (strcasecmp(data_type[i % c], "smallint") == 0) { @@ -4417,7 +4562,7 @@ static int32_t generateData(char *recBuf, char **data_type, rand_string(s, lenOfBinary); pstr += sprintf(pstr, ", \"%s\"", s); free(s); - }else if (strcasecmp(data_type[i % c], "nchar") == 0) { + } else if (strcasecmp(data_type[i % c], "nchar") == 0) { char *s = malloc(lenOfBinary); rand_string(s, lenOfBinary); pstr += sprintf(pstr, ", \"%s\"", s); @@ -4443,7 +4588,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n", __func__, __LINE__, superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); @@ -4464,7 +4609,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { return 0; } -static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) +static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k) { int affectedRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4490,7 +4635,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) return affectedRows; } -static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) +static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; if (superTblInfo) { @@ -4501,7 +4646,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { - verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", + verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); @@ -4509,16 +4654,16 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%d", + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"", g_args.tb_prefix, tableSeq); } } -static int generateDataTail( +static int64_t generateDataTail( SSuperTable* superTblInfo, - int batch, char* buffer, int remainderBufLen, int64_t insertRows, - int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { - int len = 0; + int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, + int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { + int64_t len = 0; int ncols_per_record = 1; // count first col ts char *pstr = buffer; @@ -4531,14 +4676,14 @@ static int generateDataTail( } } - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); + verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch); - int k = 0; + int64_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); - int retLen = 0; + int64_t retLen = 0; if (superTblInfo) { if (0 == strncasecmp(superTblInfo->dataSource, @@ -4552,16 +4697,16 @@ static int generateDataTail( } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int randTail = superTblInfo->timeStampStep * k; + int64_t randTail = superTblInfo->timeStampStep * k; if (superTblInfo->disorderRatio > 0) { int rand_num = taosRandom() % 100; if(rand_num < superTblInfo->disorderRatio) { randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %d\n", randTail); + debugPrint("rand data generated, back %"PRId64"\n", randTail); } } - uint64_t d = startTime + int64_t d = startTime + randTail; retLen = generateRowData( data, @@ -4581,14 +4726,15 @@ static int generateDataTail( char **data_type = g_args.datatype; int lenOfBinary = g_args.len_of_binary; - int rand_num = taosRandom() % 100; - int randTail; + int64_t randTail = DEFAULT_TIMESTAMP_STEP * k; - if ((g_args.disorderRatio != 0) - && (rand_num < g_args.disorderRatio)) { - randTail = (DEFAULT_TIMESTAMP_STEP * k - + (taosRandom() % g_args.disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %d\n", randTail); + if (g_args.disorderRatio != 0) { + int rand_num = taosRandom() % 100; + if (rand_num < g_args.disorderRatio) { + randTail = (randTail + (taosRandom() % g_args.disorderRange + 1)) * (-1); + + debugPrint("rand data generated, back %"PRId64"\n", randTail); + } } else { randTail = DEFAULT_TIMESTAMP_STEP * k; } @@ -4601,13 +4747,13 @@ static int generateDataTail( if (len > remainderBufLen) break; - pstr += sprintf(pstr, " %s", data); + pstr += sprintf(pstr, "%s", data); k++; len += retLen; remainderBufLen -= retLen; } - verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", + verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); startFrom ++; @@ -4688,13 +4834,13 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return len; } -static int generateInterlaceDataBuffer( - char *tableName, int batchPerTbl, int i, int batchPerTblTimes, - int32_t tableSeq, +static int64_t generateInterlaceDataBuffer( + char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes, + int64_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, int64_t startTime, - int *pRemainderBufLen) + int64_t *pRemainderBufLen) { assert(buffer); char *pstr = buffer; @@ -4707,15 +4853,15 @@ static int generateInterlaceDataBuffer( return 0; } // generate data buffer - verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", + verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n", pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; *pRemainderBufLen -= headLen; - int dataLen = 0; + int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); @@ -4727,7 +4873,7 @@ static int generateInterlaceDataBuffer( startTime = 1500000000000; } - int k = generateDataTail( + int64_t k = generateDataTail( superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, @@ -4737,6 +4883,8 @@ static int generateInterlaceDataBuffer( pstr += dataLen; *pRemainderBufLen -= dataLen; } else { + debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n", + __func__, __LINE__, k, batchPerTbl); pstr -= headLen; pstr[0] = '\0'; k = 0; @@ -4747,11 +4895,11 @@ static int generateInterlaceDataBuffer( static int generateProgressiveDataBuffer( char *tableName, - int32_t tableSeq, + int64_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos, - int *pRemainderBufLen) + int64_t startFrom, int64_t startTime, int64_t *pSamplePos, + int64_t *pRemainderBufLen) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4768,11 +4916,11 @@ static int generateProgressiveDataBuffer( assert(buffer != NULL); char *pstr = buffer; - int k = 0; + int64_t k = 0; memset(buffer, 0, *pRemainderBufLen); - int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, + int64_t headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, buffer, *pRemainderBufLen); if (headLen <= 0) { @@ -4781,7 +4929,7 @@ static int generateProgressiveDataBuffer( pstr += headLen; *pRemainderBufLen -= headLen; - int dataLen; + int64_t dataLen; k = generateDataTail(superTblInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, startTime, @@ -4794,10 +4942,27 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); + int64_t insertRows; + int64_t interlaceRows; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (superTblInfo) { + insertRows = superTblInfo->insertRows; + + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + } else { + insertRows = g_args.num_of_DPT; + interlaceRows = g_args.interlace_rows; + } + + if (interlaceRows > insertRows) + interlaceRows = insertRows; if (interlaceRows > g_args.num_of_RPR) interlaceRows = g_args.num_of_RPR; @@ -4813,10 +4978,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", + errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n", __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4826,20 +4991,20 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; - int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; + int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - uint64_t st = 0; - uint64_t et = 0xffffffff; + int64_t st = 0; + int64_t et = 0xffffffff; int64_t lastPrintTime = taosGetTimestampMs(); - int64_t startTs = taosGetTimestampUs(); + int64_t startTs = taosGetTimestampMs(); int64_t endTs; - int tableSeq = pThreadInfo->start_table_from; + int64_t tableSeq = pThreadInfo->start_table_from; - debugPrint("[%d] %s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n", + debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); @@ -4847,12 +5012,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { assert(pThreadInfo->ntables > 0); - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; - - int batchPerTbl = interlaceRows; + int64_t batchPerTbl = interlaceRows; - int batchPerTblTimes; + int64_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = g_args.num_of_RPR / interlaceRows; @@ -4860,21 +5022,21 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { batchPerTblTimes = 1; } - int generatedRecPerTbl = 0; + int64_t generatedRecPerTbl = 0; bool flagSleep = true; - int sleepTimeTotal = 0; + int64_t sleepTimeTotal = 0; char *strInsertInto = "insert into "; int nInsertBufLen = strlen(strInsertInto); while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampUs(); + st = taosGetTimestampMs(); flagSleep = false; } // generate data memset(buffer, 0, maxSqlLen); - int remainderBufLen = maxSqlLen; + int64_t remainderBufLen = maxSqlLen; char *pstr = buffer; @@ -4882,9 +5044,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += len; remainderBufLen -= len; - int recOfBatch = 0; + int64_t recOfBatch = 0; - for (int i = 0; i < batchPerTblTimes; i ++) { + for (int64_t i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", @@ -4893,8 +5055,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - int oldRemainderLen = remainderBufLen; - int generated = generateInterlaceDataBuffer( + int64_t oldRemainderLen = remainderBufLen; + int64_t generated = generateInterlaceDataBuffer( tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, pThreadInfo, pstr, @@ -4903,7 +5065,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { &remainderBufLen); if (generated < 0) { - debugPrint("[%d] %s() LN%d, generated data is %d\n", + debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_and_statistics_interlace; } else if (generated == 0) { @@ -4915,7 +5077,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += (oldRemainderLen - remainderBufLen); // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + verbosePrint("[%d] %s() LN%d batchPerTbl=%"PRId64" recOfBatch=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); @@ -4932,16 +5094,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (generatedRecPerTbl >= insertRows) break; + int remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) break; } } - int remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generatedRecPerTbl, insertRows); @@ -4949,30 +5111,31 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { break; } - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, __func__, __LINE__, buffer); - startTs = taosGetTimestampUs(); + startTs = taosGetTimestampMs(); - int affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); + int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); - endTs = taosGetTimestampUs(); + endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.6fms\n", - __func__, __LINE__, delay/1000.0); + performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", + __func__, __LINE__, delay); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; pThreadInfo->cntDelay++; pThreadInfo->totalDelay += delay; - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, affectedRows); if ((affectedRows < 0) || (recOfBatch != affectedRows)) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n", + errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, affectedRows, buffer); goto free_and_statistics_interlace; @@ -4990,10 +5153,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } if ((insert_interval) && flagSleep) { - et = taosGetTimestampUs(); + et = taosGetTimestampMs(); - if (insert_interval > ((et - st)/1000) ) { - int sleepTime = insert_interval - (et -st)/1000; + if (insert_interval > (et - st) ) { + int sleepTime = insert_interval - (et -st); performancePrint("%s() LN%d sleep: %d ms for insert interval\n", __func__, __LINE__, sleepTime); taosMsleep(sleepTime); // ms @@ -5035,10 +5198,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } int64_t lastPrintTime = taosGetTimestampMs(); - int64_t startTs = taosGetTimestampUs(); + int64_t startTs = taosGetTimestampMs(); int64_t endTs; - int timeStampStep = + int64_t timeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; /* int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; @@ -5051,7 +5214,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; - for (uint32_t tableSeq = + for (int64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { int64_t start_time = pThreadInfo->start_time; @@ -5062,17 +5225,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { for (int64_t i = 0; i < insertRows;) { /* if (insert_interval) { - st = taosGetTimestampUs(); + st = taosGetTimestampMs(); } */ char tableName[TSDB_TABLE_NAME_LEN]; getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%d tableName=%s\n", + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); - int remainderBufLen = maxSqlLen; + int64_t remainderBufLen = maxSqlLen; char *pstr = buffer; int nInsertBufLen = strlen("insert into "); @@ -5094,14 +5257,14 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { start_time += generated * timeStampStep; pThreadInfo->totalInsertRows += generated; - startTs = taosGetTimestampUs(); + startTs = taosGetTimestampMs(); - int affectedRows = execInsert(pThreadInfo, buffer, generated); + int64_t affectedRows = execInsert(pThreadInfo, buffer, generated); - endTs = taosGetTimestampUs(); + endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.6fms\n", - __func__, __LINE__, delay/1000.0); + performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", + __func__, __LINE__, delay); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; @@ -5126,10 +5289,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { break; /* if (insert_interval) { - et = taosGetTimestampUs(); + et = taosGetTimestampMs(); - if (insert_interval > ((et - st)/1000) ) { - int sleep_time = insert_interval - (et -st)/1000; + if (insert_interval > ((et - st)) ) { + int sleep_time = insert_interval - (et -st); performancePrint("%s() LN%d sleep: %d ms for insert interval\n", __func__, __LINE__, sleep_time); taosMsleep(sleep_time); // ms @@ -5142,7 +5305,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && (0 == strncasecmp( superTblInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%d\n", + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); } } @@ -5163,7 +5326,18 @@ static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + int interlaceRows; + + if (superTblInfo) { + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + } else { + interlaceRows = g_args.interlace_rows; + } if (interlaceRows > 0) { // interlace mode @@ -5181,16 +5355,17 @@ static void callBack(void *param, TAOS_RES *res, int code) { int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - pThreadInfo->et = taosGetTimestampUs(); - if (((pThreadInfo->et - pThreadInfo->st)/1000) < insert_interval) { - taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)/1000); // ms + pThreadInfo->et = taosGetTimestampMs(); + if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { + taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms } } char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix, + pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", + pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { @@ -5222,7 +5397,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { } if (insert_interval) { - pThreadInfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampMs(); } taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); free(buffer); @@ -5241,7 +5416,7 @@ static void *asyncWrite(void *sarg) { int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - pThreadInfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampMs(); } taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); @@ -5300,7 +5475,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, start_time = 1500000000000; } - double start = getCurrentTimeUs(); + int64_t start = taosGetTimestampMs(); // read sample data from file first if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, @@ -5337,9 +5512,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (superTblInfo) { int limit, offset; - if ((superTblInfo->childTblExists == TBL_NO_EXISTS) && + if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) && ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { - printf("WARNING: offset and limit will not be used since the child tables are not exists!\n"); + printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) @@ -5388,7 +5563,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - int childTblCount; + int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos, db_name, superTblInfo->sTblName, @@ -5421,7 +5596,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->superTblInfo = superTblInfo; t_info->start_time = start_time; - t_info->minDelay = INT16_MAX; + t_info->minDelay = INT64_MAX; if ((NULL == superTblInfo) || (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { @@ -5439,18 +5614,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->taos = NULL; } - if ((NULL == superTblInfo) +/* if ((NULL == superTblInfo) || (0 == superTblInfo->multiThreadWriteOneTbl)) { + */ t_info->start_table_from = startFrom; t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; - } else { +/* } else { t_info->start_table_from = 0; t_info->ntables = superTblInfo->childTblCount; t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); } - +*/ tsem_init(&(t_info->lock_sem), 0, 0); if (SYNC == g_Dbs.queryMode) { pthread_create(pids + i, NULL, syncWrite, t_info); @@ -5465,7 +5641,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t totalDelay = 0; int64_t maxDelay = 0; - int64_t minDelay = INT16_MAX; + int64_t minDelay = INT64_MAX; int64_t cntDelay = 1; double avgDelay = 0; @@ -5497,39 +5673,39 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (cntDelay == 0) cntDelay = 1; avgDelay = (double)totalDelay / cntDelay; - double end = getCurrentTimeUs(); - double t = end - start; + int64_t end = taosGetTimestampMs(); + int64_t t = end - start; if (superTblInfo) { - printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", - t, superTblInfo->totalInsertRows, + printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + t / 1000.0, superTblInfo->totalInsertRows, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName, - superTblInfo->totalInsertRows / t); + (double)superTblInfo->totalInsertRows / (t / 1000.0)); fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", - t, superTblInfo->totalInsertRows, + "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + t / 1000.0, superTblInfo->totalInsertRows, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName, - superTblInfo->totalInsertRows/ t); + (double)superTblInfo->totalInsertRows / (t / 1000.0)); } else { - printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", - t, g_args.totalInsertRows, + printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", + t / 1000.0, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, - g_args.totalInsertRows / t); + (double)g_args.totalInsertRows / (t / 1000.0)); fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", - t, g_args.totalInsertRows, + "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", + t * 1000.0, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, - g_args.totalInsertRows / t); + (double)g_args.totalInsertRows / (t / 1000.0)); } - printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); - fprintf(g_fpOfInsertResult, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", + avgDelay, maxDelay, minDelay); + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", + avgDelay, maxDelay, minDelay); //taos_close(taos); @@ -5576,7 +5752,7 @@ static void *readTable(void *sarg) { sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); - double t = getCurrentTimeUs(); + double t = taosGetTimestampMs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -5592,7 +5768,7 @@ static void *readTable(void *sarg) { count++; } - t = getCurrentTimeUs() - t; + t = taosGetTimestampMs() - t; totalT += t; taos_free_result(pSql); @@ -5601,7 +5777,7 @@ static void *readTable(void *sarg) { fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n", aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000); } fprintf(fp, "\n"); fclose(fp); @@ -5651,7 +5827,7 @@ static void *readMetric(void *sarg) { printf("Where condition: %s\n", condition); fprintf(fp, "%s\n", command); - double t = getCurrentTimeUs(); + double t = taosGetTimestampMs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -5667,11 +5843,11 @@ static void *readMetric(void *sarg) { while(taos_fetch_row(pSql) != NULL) { count++; } - t = getCurrentTimeUs() - t; + t = taosGetTimestampMs() - t; fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - num_of_tables * num_of_DPT / t, t * 1000); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t); + num_of_tables * num_of_DPT / (t * 1000.0), t); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t * 1000.0); taos_free_result(pSql); } @@ -5721,21 +5897,21 @@ static int insertTestProcess() { double end; // create child tables - start = getCurrentTimeUs(); + start = taosGetTimestampMs(); createChildTables(); - end = getCurrentTimeUs(); + end = taosGetTimestampMs(); if (g_totalChildTables > 0) { printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", - end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", - end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); } taosMsleep(1000); // create sub threads for inserting data - //start = getCurrentTimeUs(); + //start = taosGetTimestampMs(); for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.use_metric) { if (g_Dbs.db[i].superTblCount > 0) { @@ -5760,7 +5936,7 @@ static int insertTestProcess() { NULL); } } - //end = getCurrentTimeUs(); + //end = taosGetTimestampMs(); //int64_t totalInsertRows = 0; //int64_t totalAffectedRows = 0; @@ -5813,16 +5989,15 @@ static void *specifiedTableQuery(void *sarg) { int64_t startTs = taosGetTimestampMs(); while(queryTimes --) { - if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < - (int64_t)g_queryInfo.specifiedQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); + if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < + (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } - st = taosGetTimestampUs(); + st = taosGetTimestampMs(); if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); + int64_t t1 = taosGetTimestampMs(); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(tmpFile, "%s-%d", @@ -5831,11 +6006,11 @@ static void *specifiedTableQuery(void *sarg) { } selectAndGetResult(pThreadInfo->taos, g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + int64_t t2 = taosGetTimestampMs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000.0); } else { - int64_t t1 = taosGetTimestampUs(); + int64_t t1 = taosGetTimestampMs(); int retCode = postProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); @@ -5843,27 +6018,27 @@ static void *specifiedTableQuery(void *sarg) { printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); return NULL; } - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + int64_t t2 = taosGetTimestampMs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000.0); } totalQueried ++; g_queryInfo.specifiedQueryInfo.totalQueried ++; - et = taosGetTimestampUs(); - printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", - taosGetSelfPthreadId(), (double)(et - st)/1000.0); + et = taosGetTimestampMs(); int64_t currentPrintTime = taosGetTimestampMs(); int64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n", + __func__, __LINE__, endTs, startTs); + printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n", pThreadInfo->threadID, totalQueried, - totalQueried/((endTs-startTs)/1000.0)); + (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; } - lastPrintTime = currentPrintTime; } return NULL; } @@ -5911,7 +6086,7 @@ static void *superTableQuery(void *sarg) { } int64_t st = 0; - int64_t et = (int64_t)g_queryInfo.superQueryInfo.rate*1000; + int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; int queryTimes = g_queryInfo.superQueryInfo.queryTimes; int totalQueried = 0; @@ -5919,13 +6094,13 @@ static void *superTableQuery(void *sarg) { int64_t lastPrintTime = taosGetTimestampMs(); while(queryTimes --) { - if (g_queryInfo.superQueryInfo.rate - && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + if (g_queryInfo.superQueryInfo.queryInterval + && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } - st = taosGetTimestampUs(); + st = taosGetTimestampMs(); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); @@ -5944,20 +6119,20 @@ static void *superTableQuery(void *sarg) { int64_t currentPrintTime = taosGetTimestampMs(); int64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n", pThreadInfo->threadID, totalQueried, - totalQueried/((endTs-startTs)/1000.0)); + (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; } - lastPrintTime = currentPrintTime; } } - et = taosGetTimestampUs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", + et = taosGetTimestampMs(); + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), pThreadInfo->start_table_from, pThreadInfo->end_table_to, - (double)(et - st)/1000000.0); + (double)(et - st)/1000.0); } return NULL; @@ -6118,9 +6293,9 @@ static int queryTestProcess() { int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + g_queryInfo.superQueryInfo.totalQueried; - printf("==== completed total queries: %d, the QPS of all threads: %10.2f====\n", + printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n", totalQueried, - totalQueried/((endTs-startTs)/1000.0)); + (double)(totalQueried/((endTs-startTs)/1000.0))); return 0; } @@ -6190,8 +6365,8 @@ static void *superSubscribe(void *sarg) { //int64_t st = 0; //int64_t et = 0; do { - //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms + //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) { + // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} @@ -6277,8 +6452,8 @@ static void *specifiedSubscribe(void *sarg) { //int64_t st = 0; //int64_t et = 0; do { - //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms + //if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) { + // taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} @@ -6293,7 +6468,7 @@ static void *specifiedSubscribe(void *sarg) { } tsub[i] = subscribeImpl(pThreadInfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { + if (NULL == tsub[i]) { taos_close(pThreadInfo->taos); return NULL; } @@ -6369,7 +6544,7 @@ static int subscribeTestProcess() { //==== create sub threads for query from super table if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) || (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) { - errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); exit(-1); @@ -6628,7 +6803,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) char * line = NULL; size_t line_len = 0; - double t = getCurrentTimeUs(); + double t = taosGetTimestampMs(); while((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; @@ -6659,7 +6834,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) cmd_len = 0; } - t = getCurrentTimeUs() - t; + t = taosGetTimestampMs() - t; printf("run %s took %.6f second(s)\n\n", sqlFile, t); tmfree(cmd); diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 092374cef1e89f092e85fda6832e63f037f3a814..96a1cd16f8f724efbee8780a72bcfadea4121be1 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -214,8 +214,8 @@ static struct argp_option options[] = { // dump format options {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, - {"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, @@ -482,7 +482,8 @@ static int queryDbImpl(TAOS *taos, char *command) { static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-E") == 0) { + if ((strcmp(argv[i], "-S") == 0) + || (strcmp(argv[i], "-E") == 0)) { if (argv[i+1]) { char *tmp = strdup(argv[++i]); @@ -509,7 +510,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(-1); } } else { - errorPrint("%s() LN%d, -E need a valid value following!\n", __func__, __LINE__); + errorPrint("%s need a valid value following!\n", argv[i]); exit(-1); } } else if (strcmp(argv[i], "-g") == 0) { @@ -522,7 +523,8 @@ int main(int argc, char *argv[]) { /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - parse_args(argc, argv, &g_args); + if (argc > 1) + parse_args(argc, argv, &g_args); argp_parse(&argp, argc, argv, 0, 0, &g_args); diff --git a/src/mnode/inc/mnodeDb.h b/src/mnode/inc/mnodeDb.h index d03ba8d717a13cc943f2494dc0dca0d59d108b6f..da0865833db29fc41249791c85a937d82d450124 100644 --- a/src/mnode/inc/mnodeDb.h +++ b/src/mnode/inc/mnodeDb.h @@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS { int32_t mnodeInitDbs(); void mnodeCleanupDbs(); int64_t mnodeGetDbNum(); +int32_t mnodeGetDbMaxReplica(); SDbObj *mnodeGetDb(char *db); SDbObj *mnodeGetDbByTableName(char *db); void * mnodeGetNextDb(void *pIter, SDbObj **pDb); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 909ca7cac6ec04af7d6c624a8d7fea06e7cad702..8af20aa862d3b126cfd3332e42423e7b08414b63 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() { return sdbGetNumOfRows(tsDbSdb); } +int32_t mnodeGetDbMaxReplica() { + int32_t maxReplica = 0; + SDbObj *pDb = NULL; + void *pIter = NULL; + + while (1) { + pIter = mnodeGetNextDb(pIter, &pDb); + if (pDb == NULL) break; + + if (pDb->cfg.replications > maxReplica) + maxReplica = pDb->cfg.replications; + + mnodeDecDbRef(pDb); + } + + return maxReplica; +} + static int32_t mnodeDbActionInsert(SSdbRow *pRow) { SDbObj *pDb = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pDb->acct); diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 85d9f94b884475cbe3cd2efb0c0d662e86f86f15..b513da29f44fce09d40dfe58aea63acc192f2178 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -29,6 +29,7 @@ #include "mnodeDef.h" #include "mnodeInt.h" #include "mnodeDnode.h" +#include "mnodeDb.h" #include "mnodeMnode.h" #include "mnodeSdb.h" #include "mnodeShow.h" @@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { return TSDB_CODE_MND_NO_REMOVE_MASTER; } + int32_t maxReplica = mnodeGetDbMaxReplica(); + int32_t dnodesNum = mnodeGetDnodesNum(); + if (dnodesNum <= maxReplica) { + mError("dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d", pDnode->dnodeId, ep, dnodesNum, maxReplica); + mnodeDecDnodeRef(pDnode); + return TSDB_CODE_MND_NO_ENOUGH_DNODES; + } + mInfo("dnode:%d, start to drop it", pDnode->dnodeId); int32_t code = bnDropDnode(pDnode); diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 17a4282d05935684df4ab6585fef5f2398a62979..459d981138f05fde00c11f71e0b75048f2fc360d 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -344,7 +344,7 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->bytes[cols] = 24; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "qhandle"); + strcpy(pSchema[cols].name, "qid"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -420,7 +420,7 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v cols++; char handleBuf[24] = {0}; - snprintf(handleBuf, tListLen(handleBuf), "%p", (void*)htobe64(pDesc->qHandle)); + snprintf(handleBuf, tListLen(handleBuf), "%"PRIu64, htobe64(pDesc->qId)); pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; STR_WITH_MAXSIZE_TO_VARSTR(pWrite, handleBuf, pShow->bytes[cols]); diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 8312b74a5015aeb0991b303f0ea8f207cb2cf3eb..c3e02b14db31a8d5687de2e3b7368d2d5e1626eb 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -32,6 +32,10 @@ extern "C" { #include "osArm32.h" #endif +#ifdef _TD_MIPS_64 +#include "osMips64.h" +#endif + #ifdef _TD_LINUX_64 #include "osLinux64.h" #endif diff --git a/src/os/inc/osMips64.h b/src/os/inc/osMips64.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7b08a31116d20a8eaaaacfa3a9a64ea75dbcee --- /dev/null +++ b/src/os/inc/osMips64.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_MIPS64_H +#define TDENGINE_OS_MIPS64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/src/darwin/darwinSysInfo.c b/src/os/src/darwin/darwinSysInfo.c index 55c07766b307f57fbc8fd76798ac880efef8a4de..6e70043779d01be361f66a67b16608b56a9285ed 100644 --- a/src/os/src/darwin/darwinSysInfo.c +++ b/src/os/src/darwin/darwinSysInfo.c @@ -217,7 +217,7 @@ void taosSetCoreDump() {} int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { struct statvfs info; - if (statvfs(tsDataDir, &info)) { + if (statvfs(dataDir, &info)) { uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index c0d46878a821db3490ccf54f8e303e37e52f84aa..bbda08aa25cdf6adb91a53157d8cd6be291d9381 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -319,7 +319,7 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { struct statvfs info; - if (statvfs(tsDataDir, &info)) { + if (statvfs(dataDir, &info)) { uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; diff --git a/src/os/tests/test.cpp b/src/os/tests/test.cpp index 600e5d71a760653f8a932985f0d14a60081575b6..12e9546ff184589a139de8dfd8a5f8a9a392a7b3 100644 --- a/src/os/tests/test.cpp +++ b/src/os/tests/test.cpp @@ -18,7 +18,7 @@ TEST(testCase, parse_time) { deltaToUtcInitOnce(); // window: 1500000001000, 1500002000000 - // pQuery->interval: interval: 86400000, sliding:3600000 + // pQueryAttr->interval: interval: 86400000, sliding:3600000 int64_t key = 1500000001000; SInterval interval = {0}; interval.interval = 86400000; diff --git a/src/plugins/http/inc/httpContext.h b/src/plugins/http/inc/httpContext.h index b016da2dd3a75bc4e51f29e3f9344458d11badea..af52fdd1ebbfe70fee3f278f94aa95faa1f4caba 100644 --- a/src/plugins/http/inc/httpContext.h +++ b/src/plugins/http/inc/httpContext.h @@ -25,7 +25,7 @@ const char *httpContextStateStr(HttpContextState state); HttpContext *httpCreateContext(SOCKET fd); bool httpInitContext(HttpContext *pContext); HttpContext *httpGetContext(void * pContext); -void httpReleaseContext(HttpContext *pContext, bool clearRes); +void httpReleaseContext(HttpContext *pContext/*, bool clearRes*/); void httpCloseContextByServer(HttpContext *pContext); void httpCloseContextByApp(HttpContext *pContext); void httpNotifyContextClose(HttpContext *pContext); diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 13f706af653e9c5c1b9fb0a4c602355b001d0cac..51adef11b9af3ebb83537024edbb3ba369aaeb03 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -146,20 +146,20 @@ HttpContext *httpGetContext(void *ptr) { return NULL; } -void httpReleaseContext(HttpContext *pContext, bool clearRes) { +void httpReleaseContext(HttpContext *pContext/*, bool clearRes*/) { int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1); if (refCount < 0) { httpError("context:%p, is already released, refCount:%d", pContext, refCount); return; } - + /* if (clearRes) { if (pContext->parser) { httpClearParser(pContext->parser); } memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd)); } - + */ HttpContext **ppContext = pContext->ppContext; httpTrace("context:%p, is released, data:%p refCount:%d", pContext, ppContext, refCount); @@ -217,7 +217,7 @@ void httpCloseContextByApp(HttpContext *pContext) { httpContextStateStr(pContext->state), pContext->state); } - httpReleaseContext(pContext, true); + httpReleaseContext(pContext/*, true*/); } void httpCloseContextByServer(HttpContext *pContext) { @@ -235,5 +235,5 @@ void httpCloseContextByServer(HttpContext *pContext) { pContext->parsed = false; httpRemoveContextFromEpoll(pContext); - httpReleaseContext(pContext, true); + httpReleaseContext(pContext/*, true*/); } diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index ad79e24061d251351bc73790ac341c9dc31b29b6..d51c774ff269d5790868727941a632d133dd6733 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -50,6 +50,7 @@ bool httpProcessData(HttpContext* pContext) { */ // httpCloseContextByApp(pContext); } else { + httpClearParser(pContext->parser); httpProcessRequest(pContext); } } diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index a5f40fdc4cf89f6322d310ca2093336862a9045d..4dcf3d550177ff6747a2af1c0c9ff561a3b0dd29 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -177,7 +177,7 @@ static void httpProcessHttpData(void *param) { if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) { httpDebug("context:%p, fd:%d, state:%s, not in ready state, ignore read events", pContext, pContext->fd, httpContextStateStr(pContext->state)); - httpReleaseContext(pContext, true); + httpReleaseContext(pContext/*, true*/); continue; } @@ -191,7 +191,7 @@ static void httpProcessHttpData(void *param) { (*(pThread->processData))(pContext); atomic_fetch_add_32(&pServer->requestNum, 1); } else { - httpReleaseContext(pContext, false); + httpReleaseContext(pContext/*, false*/); } } } @@ -275,7 +275,7 @@ static void *httpAcceptHttpConnection(void *arg) { httpError("context:%p, fd:%d, ip:%s, thread:%s, failed to add http fd for epoll, error:%s", pContext, connFd, pContext->ipstr, pThread->label, strerror(errno)); taosCloseSocket(pContext->fd); - httpReleaseContext(pContext, true); + httpReleaseContext(pContext/*, true*/); continue; } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 4e9b54b7bd2b25b2a8bbdd29e6d54ae314b7519d..b345c1531f1f6904470a2e9dbae361834ef32fff 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -376,6 +376,8 @@ void httpExecCmd(HttpContext *pContext) { httpCloseContextByApp(pContext); break; } + + memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd)); } void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index c59067c4b357aa084cd6f31c39f3038075d18a82..bdccd4eb3c0da8b54bd0c44589146750c8eddee7 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -122,7 +122,7 @@ enum { #define QUERY_IS_FREE_RESOURCE(type) (((type)&TSDB_QUERY_TYPE_FREE_RESOURCE) != 0) typedef struct SArithmeticSupport { - SExprInfo *pArithExpr; + SExprInfo *pExprInfo; int32_t numOfCols; SColumnInfo *colList; void *exprList; // client side used @@ -210,9 +210,6 @@ typedef struct SAggFunctionInfo { void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function - // some sql function require scan data twice or more, e.g.,stddev, percentile - void (*xNextStep)(SQLFunctionCtx *pCtx); - // finalizer must be called after all xFunction has been executed to generated final result. void (*xFinalize)(SQLFunctionCtx *pCtx); void (*mergeFunc)(SQLFunctionCtx *pCtx); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 31ac70d6cdcacfe4271fd69e94369ca71237799b..b9361650e99a0531033cf4f680860730266f68f3 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -86,7 +86,7 @@ typedef struct SResultRow { bool closed; // this result status: closed or opened uint32_t numOfRows; // number of rows of current time window SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo - STimeWindow win; + STimeWindow win; char* key; // start key of current result row } SResultRow; @@ -178,8 +178,11 @@ typedef struct SSDataBlock { SDataBlockInfo info; } SSDataBlock; -typedef struct SQuery { +// The basic query information extracted from the SQueryInfo tree to support the +// execution of query in a data node. +typedef struct SQueryAttr { SLimitVal limit; + SLimitVal slimit; bool stableQuery; // super table query or not bool topBotQuery; // TODO used bitwise flag @@ -188,6 +191,11 @@ typedef struct SQuery { bool timeWindowInterpo;// if the time window start/end required interpolation bool queryBlockDist; // if query data block distribution bool stabledev; // super table stddev query + bool tsCompQuery; // is tscomp query + bool simpleAgg; + bool pointInterpQuery; // point interpolation query + bool needReverseScan; // need reverse scan + bool distinctTag; // distinct tag query int32_t interBufSize; // intermediate buffer sizse int32_t havingNum; // having expr number @@ -202,39 +210,41 @@ typedef struct SQuery { int16_t precision; int16_t numOfOutput; int16_t fillType; - int16_t checkResultBuf; // check if the buffer is full during scan each block int32_t srcRowSize; // todo extract struct int32_t resultRowSize; int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query. - int32_t maxSrcColumnSize; + int32_t maxTableColumnWidth; int32_t tagLen; // tag value length of current query SSqlGroupbyExpr* pGroupbyExpr; + SExprInfo* pExpr1; SExprInfo* pExpr2; int32_t numOfExpr2; - SColumnInfo* colList; + SExprInfo* pExpr3; + int32_t numOfExpr3; + + SColumnInfo* tableCols; SColumnInfo* tagColList; int32_t numOfFilterCols; int64_t* fillVal; SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query. SSingleColumnFilterInfo* pFilterInfo; - STableQueryInfo* current; void* tsdb; SMemRef memRef; STableGroupInfo tableGroupInfo; // table list SArray int32_t vgId; -} SQuery; +} SQueryAttr; -typedef SSDataBlock* (*__operator_fn_t)(void* param); +typedef SSDataBlock* (*__operator_fn_t)(void* param, bool* newgroup); typedef void (*__optr_cleanup_fn_t)(void* param, int32_t num); struct SOperatorInfo; typedef struct SQueryRuntimeEnv { jmp_buf env; - SQuery* pQuery; + SQueryAttr* pQueryAttr; uint32_t status; // query status void* qinfo; uint8_t scanFlag; // denotes reversed scan of data or not @@ -257,10 +267,10 @@ typedef struct SQueryRuntimeEnv { SSDataBlock *outputBuf; STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure struct SOperatorInfo *proot; - struct SOperatorInfo *pTableScanner; // table scan operator SGroupResInfo groupResInfo; int64_t currentOffset; // dynamic offset value + STableQueryInfo *current; SRspResultInfo resultInfo; SHashObj *pTableRetrieveTsMap; } SQueryRuntimeEnv; @@ -281,13 +291,17 @@ enum OPERATOR_TYPE_E { OP_Arithmetic = 7, OP_Groupby = 8, OP_Limit = 9, - OP_Offset = 10, + OP_SLimit = 10, OP_TimeWindow = 11, OP_SessionWindow = 12, OP_Fill = 13, OP_MultiTableAggregate = 14, OP_MultiTableTimeInterval = 15, - OP_Having = 16, + OP_DummyInput = 16, //TODO remove it after fully refactor. + OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream. + OP_GlobalAggregate = 18, // global merge for the multi-way data sources. + OP_Filter = 19, + OP_Distinct = 20, }; typedef struct SOperatorInfo { @@ -310,6 +324,12 @@ enum { QUERY_RESULT_READY = 2, }; +typedef struct { + int32_t numOfTags; + int32_t numOfCols; + SColumnInfo *colList; +} SQueriedTableInfo; + typedef struct SQInfo { void* signature; uint64_t qId; @@ -317,7 +337,7 @@ typedef struct SQInfo { int64_t owner; // if it is in execution SQueryRuntimeEnv runtimeEnv; - SQuery query; + SQueryAttr query; void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables; pthread_mutex_t lock; // used to synchronize the rsp/query threads @@ -335,14 +355,16 @@ typedef struct SQueryParam { char *tbnameCond; char *prevResult; SArray *pTableIdList; - SSqlFuncMsg **pExprMsg; - SSqlFuncMsg **pSecExprMsg; + SSqlExpr **pExpr; + SSqlExpr **pSecExpr; SExprInfo *pExprs; SExprInfo *pSecExprs; SColIndex *pGroupColIndex; SColumnInfo *pTagColumnInfo; SSqlGroupbyExpr *pGroupbyExpr; + int32_t tableScanOperator; + SArray *pOperator; } SQueryParam; typedef struct STableScanInfo { @@ -394,26 +416,39 @@ typedef struct SArithOperatorInfo { SOptrBasicInfo binfo; int32_t bufCapacity; uint32_t seed; + + SSDataBlock *existDataBlock; } SArithOperatorInfo; typedef struct SLimitOperatorInfo { - int64_t limit; - int64_t total; + int64_t limit; + int64_t total; } SLimitOperatorInfo; -typedef struct SOffsetOperatorInfo { - int64_t offset; -} SOffsetOperatorInfo; +typedef struct SSLimitOperatorInfo { + int64_t groupTotal; + int64_t currentGroupOffset; + + int64_t rowsTotal; + int64_t currentOffset; + SLimitVal limit; + SLimitVal slimit; -typedef struct SHavingOperatorInfo { - SArray* fp; -} SHavingOperatorInfo; + char **prevRow; + SArray *orderColumnList; +} SSLimitOperatorInfo; +typedef struct SFilterOperatorInfo { + SSingleColumnFilterInfo *pFilterInfo; + int32_t numOfFilterCols; +} SFilterOperatorInfo; typedef struct SFillOperatorInfo { SFillInfo *pFillInfo; SSDataBlock *pRes; int64_t totalInputRows; + + SSDataBlock *existNewGroupBlock; } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { @@ -430,25 +465,95 @@ typedef struct SSWindowOperatorInfo { int32_t start; // start row index } SSWindowOperatorInfo; +typedef struct SDistinctOperatorInfo { + SHashObj *pSet; + SSDataBlock *pRes; + bool recordNullVal; //has already record the null value, no need to try again + int64_t threshold; + int64_t outputCapacity; +} SDistinctOperatorInfo; + +struct SLocalMerger; + +typedef struct SMultiwayMergeInfo { + struct SLocalMerger *pMerge; + SOptrBasicInfo binfo; + int32_t bufCapacity; + int64_t seed; + char **prevRow; + SArray *orderColumnList; + int32_t resultRowFactor; + + bool hasGroupColData; + char **currentGroupColData; + SArray *groupColumnList; + bool hasDataBlockForNewGroup; + SSDataBlock *pExistBlock; + + bool hasPrev; + bool groupMix; +} SMultiwayMergeInfo; + +SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); +SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime); +SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); + +SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); +SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); +SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput, + int32_t numOfRows, void* merger, bool groupMix); +SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param); +SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger); +SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); + +SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); +SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); +SSDataBlock* doSLimit(void* param, bool* newgroup); + +SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows); +void* destroyOutputBuf(SSDataBlock* pBlock); + +void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order); +int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput); +void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset); +void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows); + void freeParam(SQueryParam *param); int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); -int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, - SColumnInfo* pTagCols); +int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo, + SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg); + int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, - SSqlFuncMsg **pExprMsg, SExprInfo *prevExpr); + SSqlExpr **pExpr, SExprInfo *prevExpr); SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql, uint64_t *qId); -int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable); + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId); + +int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start, + int32_t prevResultLen, void* merger); + void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters); +STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf); +int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg); + bool isQueryKilled(SQInfo *pQInfo); int32_t checkForQueryBuf(size_t numOfTables); bool doBuildResCheck(SQInfo* pQInfo); void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status); -bool onlyQueryTags(SQuery* pQuery); +bool onlyQueryTags(SQueryAttr* pQueryAttr); bool isValidQInfo(void *param); int32_t doDumpQueryResult(SQInfo *pQInfo, char *data); @@ -457,6 +562,7 @@ size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows); void setQueryKilled(SQInfo *pQInfo); void queryCostStatis(SQInfo *pQInfo); void freeQInfo(SQInfo *pQInfo); +void freeQueryAttr(SQueryAttr *pQuery); int32_t getMaximumIdleDurationSec(); diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index df6e64ddd85c4ec6be693f262ea561ee23f3bf0b..b851fbb3e076db76a94b8542b83e6f3dc073f3f3 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -237,6 +237,11 @@ int32_t compare_a(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1 int32_t compare_d(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1, int32_t numOfRow2, int32_t s2, char *data2); +struct SSDataBlock; +int32_t compare_aRv(struct SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order); + +int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes); + #ifdef __cplusplus } #endif diff --git a/src/query/inc/qPlan.h b/src/query/inc/qPlan.h new file mode 100644 index 0000000000000000000000000000000000000000..8f35565e4bccd4896ec49ddb30f7236ac4b4650c --- /dev/null +++ b/src/query/inc/qPlan.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_QPLAN_H +#define TDENGINE_QPLAN_H + +//TODO refactor +SArray* createTableScanPlan(SQueryAttr* pQueryAttr); +SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr); +SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr); + +#endif // TDENGINE_QPLAN_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index c5ee172c40843674a8b167699edb9e30d15275a8..85cba06b3e82e72cbe4e4b58f6b4aa3e3c58a05f 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -40,8 +40,14 @@ enum SQL_NODE_TYPE { }; enum SQL_NODE_FROM_TYPE { - SQL_NODE_FROM_SUBQUERY = 1, - SQL_NODE_FROM_NAMELIST = 2, + SQL_NODE_FROM_SUBQUERY = 1, + SQL_NODE_FROM_TABLELIST = 2, +}; + +enum SQL_EXPR_FLAG { + EXPR_FLAG_TS_ERROR = 1, + EXPR_FLAG_US_TIMESTAMP = 2, + EXPR_FLAG_TIMESTAMP_VAR = 3, }; extern char tTokenTypeSwitcher[13]; @@ -83,11 +89,11 @@ typedef struct SSessionWindowVal { SStrToken gap; } SSessionWindowVal; -struct SFromInfo; +struct SRelationInfo; -typedef struct SQuerySqlNode { - struct SArray *pSelectList; // select clause - struct SFromInfo *from; // from clause SArray +typedef struct SSqlNode { + struct SArray *pSelNodeList; // select clause + struct SRelationInfo *from; // from clause SArray struct tSqlExpr *pWhere; // where clause [optional] SArray *pGroupby; // groupby clause, only for tags[optional], SArray SArray *pSortOrder; // orderby [optional], SArray @@ -99,25 +105,17 @@ typedef struct SQuerySqlNode { SLimitVal slimit; // group limit offset [optional] SStrToken sqlstr; // sql string in select clause struct tSqlExpr *pHaving; // having clause [optional] -} SQuerySqlNode; +} SSqlNode; typedef struct STableNamePair { SStrToken name; SStrToken aliasName; } STableNamePair; -typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause - SQuerySqlNode **pClause; - int32_t numOfClause; -} SSubclauseInfo; - -typedef struct SFromInfo { - int32_t type; // nested query|table name list - union { - SSubclauseInfo *pNode; - SArray *tableList; // SArray - }; -} SFromInfo; +typedef struct SRelationInfo { + int32_t type; // nested query|table name list + SArray *list; // SArray|SArray +} SRelationInfo; typedef struct SCreatedTableInfo { SStrToken name; // table name token @@ -140,7 +138,7 @@ typedef struct SCreateTableSql { } colInfo; SArray *childTableInfo; // SArray - SQuerySqlNode *pSelect; + SSqlNode *pSelect; } SCreateTableSql; typedef struct SAlterTableInfo { @@ -217,7 +215,7 @@ typedef struct SMiscInfo { typedef struct SSqlInfo { int32_t type; bool valid; - SSubclauseInfo subclauseInfo; + SArray *list; // todo refactor char msg[256]; union { SCreateTableSql *pCreateTableInfo; @@ -237,7 +235,8 @@ typedef struct tSqlExpr { SStrToken colInfo; // table column info tVariant value; // the use input value SStrToken token; // original sql expr string - + uint32_t flags; + struct tSqlExpr *pLeft; // left child struct tSqlExpr *pRight; // right child struct SArray *pParam; // function parameters list @@ -254,14 +253,9 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); -tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType); - -int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right); - -tSqlExpr *tSqlExprClone(tSqlExpr *pSrc); -SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); -SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode); -void *destroyFromInfo(SFromInfo* pFromInfo); +SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); +SRelationInfo *setSubquery(SRelationInfo* pFromInfo, SArray* pSqlNode); +void *destroyRelationInfo(SRelationInfo* pFromInfo); // sql expr leaf node tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType); @@ -276,23 +270,23 @@ void tSqlExprDestroy(tSqlExpr *pExpr); SArray *tSqlExprListAppend(SArray *pList, tSqlExpr *pNode, SStrToken *pDistinct, SStrToken *pToken); void tSqlExprListDestroy(SArray *pList); -SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, +SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving); +int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right); -SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type); +SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type); SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable); SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); -void destroyAllSelectClause(SSubclauseInfo *pSql); -void destroyQuerySqlNode(SQuerySqlNode *pSql); +void destroyAllSqlNode(SArray *pSqlNode); +void destroySqlNode(SSqlNode *pSql); void freeCreateTableInfo(void* p); -SSqlInfo *setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type); -SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo); - -SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause); +SSqlInfo *setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type); +SArray *setSubclause(SArray *pList, void *pSqlNode); +SArray *appendSelectClause(SArray *pList, void *pSubclause); void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index cb8c9679ec9e08be7fdb9f24c2242b3bc93621de..3ca6d967463ff86de82112bb86307715c73715cd 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -28,9 +28,9 @@ #define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1) +#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1) -int32_t getOutputInterResultBufSize(SQuery* pQuery); +int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr); size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv); int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size, int16_t type); @@ -52,20 +52,11 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int return pResultRowInfo->pResult[slot]; } -static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset, - int16_t offset, int32_t size) { - assert(rowOffset >= 0 && pRuntimeEnv != NULL); +static FORCE_INLINE char *getPosInResultPage(SQueryAttr *pQueryAttr, tFilePage* page, int32_t rowOffset, int16_t offset) { + assert(rowOffset >= 0 && pQueryAttr != NULL); - SQuery* pQuery = pRuntimeEnv->pQuery; - int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize; - - int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery); - - // buffer overflow check - int64_t bufEnd = (rowOffset + offset * numOfRows + size); - assert(page->num <= pageSize && bufEnd <= page->num); - - return ((char*)page->data) + rowOffset + offset * numOfRows; + int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); + return ((char *)page->data) + rowOffset + offset * numOfRows; } bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index f9a4f1b51d34b17fddeb07946361f328d4702a1d..fd922240c2d452f125ce07c77a7faef84f2a31be 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -450,16 +450,16 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { } //////////////////////// The SELECT statement ///////////////////////////////// -%type select {SQuerySqlNode*} -%destructor select {destroyQuerySqlNode($$);} +%type select {SSqlNode*} +%destructor select {destroySqlNode($$);} select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N); } select(A) ::= LP select(B) RP. {A = B;} -%type union {SSubclauseInfo*} -%destructor union {destroyAllSelectClause($$);} +%type union {SArray*} +%destructor union {destroyAllSqlNode($$);} union(Y) ::= select(X). { Y = setSubclause(NULL, X); } union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); } @@ -505,35 +505,30 @@ distinct(X) ::= DISTINCT(Y). { X = Y; } distinct(X) ::= . { X.n = 0;} // A complete FROM clause. -%type from {SFromInfo*} +%type from {SRelationInfo*} +%destructor from {destroyRelationInfo($$);} from(A) ::= FROM tablelist(X). {A = X;} -from(A) ::= FROM LP union(Y) RP. {A = Y;} +from(A) ::= FROM LP union(Y) RP. {A = setSubquery(NULL, Y);} -%type tablelist {SArray*} +%type tablelist {SRelationInfo*} +%destructor tablelist {destroyRelationInfo($$);} tablelist(A) ::= ids(X) cpxName(Y). { - toTSDBType(X.type); X.n += Y.n; A = setTableNameList(NULL, &X, NULL); } tablelist(A) ::= ids(X) cpxName(Y) ids(Z). { - toTSDBType(X.type); - toTSDBType(Z.type); X.n += Y.n; A = setTableNameList(NULL, &X, &Z); } tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { - toTSDBType(X.type); X.n += Z.n; A = setTableNameList(Y, &X, NULL); } tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). { - toTSDBType(X.type); - toTSDBType(F.type); X.n += Z.n; - A = setTableNameList(Y, &X, &F); } @@ -674,6 +669,8 @@ expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCr expr(A) ::= STRING(X). { A = tSqlExprCreateIdValue(&X, TK_STRING);} expr(A) ::= NOW(X). { A = tSqlExprCreateIdValue(&X, TK_NOW); } expr(A) ::= VARIABLE(X). { A = tSqlExprCreateIdValue(&X, TK_VARIABLE);} +expr(A) ::= PLUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);} +expr(A) ::= MINUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);} expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(&X, TK_BOOL);} expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(&X, TK_NULL);} diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index e47545da95caaf10865c2ccdd93de1113b62d40c..3b1ffa46d9173f88268c0bac58e4fac7cfe5edfb 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -375,12 +375,6 @@ int32_t isValidFunction(const char* name, int32_t len) { return -1; } -// set the query flag to denote that query is completed -static void no_next_step(SQLFunctionCtx *pCtx) { - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->complete = true; -} - static bool function_setup(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->initialized) { @@ -1540,7 +1534,7 @@ static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { } } -static void stddev_next_step(SQLFunctionCtx *pCtx) { +static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) { /* * the stddevInfo and the average info struct share the same buffer area * And the position of each element in their struct is exactly the same matched @@ -2482,6 +2476,29 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { } } + +/* + * keep the intermediate results during scan data blocks in the format of: + * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ + * |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------| + * +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+ + */ +static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { + char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); + pTopBotInfo->res = (tValuePair**) tmp; + tmp += POINTER_BYTES * pCtx->param[0].i64; + + size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; +// assert(pCtx->param[0].i64 > 0); + + for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { + pTopBotInfo->res[i] = (tValuePair*) tmp; + pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); + tmp += size; + } +} + + bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo == NULL) { @@ -2495,6 +2512,10 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha return true; } + if ((void *)pTopBotInfo->res[0] != (void *)((char *)pTopBotInfo + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pTopBotInfo, pCtx); + } + tValuePair **pRes = (tValuePair**) pTopBotInfo->res; if (pCtx->functionId == TSDB_FUNC_TOP) { @@ -2534,27 +2555,6 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha } } -/* - * keep the intermediate results during scan data blocks in the format of: - * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ - * |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------| - * +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+ - */ -static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { - char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); - pTopBotInfo->res = (tValuePair**) tmp; - tmp += POINTER_BYTES * pCtx->param[0].i64; - - size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; -// assert(pCtx->param[0].i64 > 0); - - for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { - pTopBotInfo->res[i] = (tValuePair*) tmp; - pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); - tmp += size; - } -} - static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) { if (!function_setup(pCtx)) { return false; @@ -2609,6 +2609,10 @@ static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) { STopBotInfo *pRes = getTopBotOutputInfo(pCtx); assert(pRes->num >= 0); + + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } SET_VAL(pCtx, 1, 1); TSKEY ts = GET_TS_DATA(pCtx, index); @@ -2911,7 +2915,7 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } -static void percentile_next_step(SQLFunctionCtx *pCtx) { +static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) { SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); @@ -3016,7 +3020,7 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo)); pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo)); - + if (pInput->pHisto->numOfElems <= 0) { return; } @@ -3035,7 +3039,7 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); tHistogramDestroy(&pRes); } - + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; SET_VAL(pCtx, 1, 1); @@ -3046,7 +3050,7 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); SAPercentileInfo *pOutput = GET_ROWCELL_INTERBUF(pResInfo); - + if (pCtx->currentStage == MERGE_STAGE) { if (pResInfo->hasResult == DATA_SET_FLAG) { // check for null assert(pOutput->pHisto->numOfElems > 0); @@ -3322,8 +3326,6 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); char *pData = GET_INPUT_DATA(pCtx, index); memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - - pCtx->pOutput += pCtx->inputBytes; } /** @@ -3361,9 +3363,15 @@ static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { * @param pCtx * @return */ +static void copy_function(SQLFunctionCtx *pCtx); + static void tag_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, 1, 1); - tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); + if (pCtx->currentStage == MERGE_STAGE) { + copy_function(pCtx); + } else { + tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); + } } static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -3700,7 +3708,7 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += pCtx->size; SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; - arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); + arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); } static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -3708,7 +3716,7 @@ static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; sas->offset = index; - arithmeticTreeTraverse(sas->pArithExpr->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData); + arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData); pCtx->pOutput += pCtx->outputBytes; } @@ -4897,7 +4905,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, count_function, count_function_f, - no_next_step, doFinalizer, count_func_merge, countRequired, @@ -4911,7 +4918,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, sum_function, sum_function_f, - no_next_step, function_finalizer, sum_func_merge, statisRequired, @@ -4925,7 +4931,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, avg_function, avg_function_f, - no_next_step, avg_finalizer, avg_func_merge, statisRequired, @@ -4939,7 +4944,6 @@ SAggFunctionInfo aAggs[] = {{ min_func_setup, min_function, min_function_f, - no_next_step, function_finalizer, min_func_merge, statisRequired, @@ -4953,7 +4957,6 @@ SAggFunctionInfo aAggs[] = {{ max_func_setup, max_function, max_function_f, - no_next_step, function_finalizer, max_func_merge, statisRequired, @@ -4967,7 +4970,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, stddev_function, stddev_function_f, - stddev_next_step, stddev_finalizer, noop1, dataBlockRequired, @@ -4981,7 +4983,6 @@ SAggFunctionInfo aAggs[] = {{ percentile_function_setup, percentile_function, percentile_function_f, - percentile_next_step, percentile_finalizer, noop1, dataBlockRequired, @@ -4995,7 +4996,6 @@ SAggFunctionInfo aAggs[] = {{ apercentile_function_setup, apercentile_function, apercentile_function_f, - no_next_step, apercentile_finalizer, apercentile_func_merge, dataBlockRequired, @@ -5009,7 +5009,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, first_function, first_function_f, - no_next_step, function_finalizer, noop1, firstFuncRequired, @@ -5023,7 +5022,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, last_function, last_function_f, - no_next_step, function_finalizer, noop1, lastFuncRequired, @@ -5038,7 +5036,6 @@ SAggFunctionInfo aAggs[] = {{ first_last_function_setup, last_row_function, noop2, - no_next_step, last_row_finalizer, last_dist_func_merge, dataBlockRequired, @@ -5053,7 +5050,6 @@ SAggFunctionInfo aAggs[] = {{ top_bottom_function_setup, top_function, top_function_f, - no_next_step, top_bottom_func_finalizer, top_func_merge, dataBlockRequired, @@ -5068,7 +5064,6 @@ SAggFunctionInfo aAggs[] = {{ top_bottom_function_setup, bottom_function, bottom_function_f, - no_next_step, top_bottom_func_finalizer, bottom_func_merge, dataBlockRequired, @@ -5082,7 +5077,6 @@ SAggFunctionInfo aAggs[] = {{ spread_function_setup, spread_function, spread_function_f, - no_next_step, spread_function_finalizer, spread_func_merge, countRequired, @@ -5096,7 +5090,6 @@ SAggFunctionInfo aAggs[] = {{ twa_function_setup, twa_function, twa_function_f, - no_next_step, twa_function_finalizer, twa_function_copy, dataBlockRequired, @@ -5110,7 +5103,6 @@ SAggFunctionInfo aAggs[] = {{ leastsquares_function_setup, leastsquares_function, leastsquares_function_f, - no_next_step, leastsquares_finalizer, noop1, dataBlockRequired, @@ -5124,35 +5116,32 @@ SAggFunctionInfo aAggs[] = {{ function_setup, date_col_output_function, date_col_output_function_f, - no_next_step, doFinalizer, copy_function, noDataRequired, }, { // 17 - "ts", + "ts_dummy", TSDB_FUNC_TS_DUMMY, TSDB_FUNC_TS_DUMMY, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, noop1, noop2, - no_next_step, doFinalizer, copy_function, dataBlockRequired, }, { // 18 - "tag", + "tag_dummy", TSDB_FUNC_TAG_DUMMY, TSDB_FUNC_TAG_DUMMY, TSDB_BASE_FUNC_SO, function_setup, tag_function, noop2, - no_next_step, doFinalizer, copy_function, noDataRequired, @@ -5166,7 +5155,6 @@ SAggFunctionInfo aAggs[] = {{ ts_comp_function_setup, ts_comp_function, ts_comp_function_f, - no_next_step, ts_comp_finalize, copy_function, dataBlockRequired, @@ -5180,7 +5168,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, tag_function, tag_function_f, - no_next_step, doFinalizer, copy_function, noDataRequired, @@ -5194,7 +5181,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, col_project_function, col_project_function_f, - no_next_step, doFinalizer, copy_function, dataBlockRequired, @@ -5208,7 +5194,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, tag_project_function, tag_project_function_f, - no_next_step, doFinalizer, copy_function, noDataRequired, @@ -5222,7 +5207,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, arithmetic_function, arithmetic_function_f, - no_next_step, doFinalizer, copy_function, dataBlockRequired, @@ -5236,7 +5220,6 @@ SAggFunctionInfo aAggs[] = {{ diff_function_setup, diff_function, diff_function_f, - no_next_step, doFinalizer, noop1, dataBlockRequired, @@ -5251,7 +5234,6 @@ SAggFunctionInfo aAggs[] = {{ first_last_function_setup, first_dist_function, first_dist_function_f, - no_next_step, function_finalizer, first_dist_func_merge, firstDistFuncRequired, @@ -5265,7 +5247,6 @@ SAggFunctionInfo aAggs[] = {{ first_last_function_setup, last_dist_function, last_dist_function_f, - no_next_step, function_finalizer, last_dist_func_merge, lastDistFuncRequired, @@ -5279,7 +5260,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, stddev_dst_function, stddev_dst_function_f, - no_next_step, stddev_dst_finalizer, stddev_dst_merge, dataBlockRequired, @@ -5293,7 +5273,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, interp_function, do_sum_f, // todo filter handle - no_next_step, doFinalizer, copy_function, dataBlockRequired, @@ -5307,7 +5286,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, rate_function, rate_function_f, - no_next_step, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5321,7 +5299,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, irate_function, irate_function_f, - no_next_step, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5335,7 +5312,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, rate_function, rate_function_f, - no_next_step, sumrate_finalizer, sumrate_func_merge, dataBlockRequired, @@ -5349,7 +5325,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, irate_function, irate_function_f, - no_next_step, sumrate_finalizer, sumrate_func_merge, dataBlockRequired, @@ -5363,7 +5338,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, rate_function, rate_function_f, - no_next_step, sumrate_finalizer, sumrate_func_merge, dataBlockRequired, @@ -5377,7 +5351,6 @@ SAggFunctionInfo aAggs[] = {{ rate_function_setup, irate_function, irate_function_f, - no_next_step, sumrate_finalizer, sumrate_func_merge, dataBlockRequired, @@ -5391,7 +5364,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, noop1, noop2, - no_next_step, noop1, noop1, dataBlockRequired, @@ -5405,7 +5377,6 @@ SAggFunctionInfo aAggs[] = {{ function_setup, blockInfo_func, noop2, - no_next_step, blockinfo_func_finalizer, block_func_merge, dataBlockRequired, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dc524d95a957f3de13a45315c819b9ae0fe9a4d6..818e7c6b039ffd42c8a8dca32f365f4f8590b508 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -35,13 +35,6 @@ #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) -#define CHECK_IF_QUERY_KILLED(_q) \ - do { \ - if (isQueryKilled((_q)->qinfo)) { \ - longjmp((_q)->env, TSDB_CODE_TSC_QUERY_CANCELLED); \ - } \ - } while (0) - #define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} #define TIME_WINDOW_COPY(_dst, _src) do {\ @@ -98,28 +91,26 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) - uint64_t queryHandleId = 0; int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } - int64_t genQueryId(void) { int64_t uid = 0; int64_t did = tsDnodeId; - + uid = did << 54; - + int64_t pid = ((int64_t)taosGetPId()) & 0x3FF; uid |= pid << 44; - + int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF; uid |= ts << 11; - + int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF; uid |= sid; @@ -127,21 +118,19 @@ int64_t genQueryId(void) { return uid; } - - -static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') { - tw->skey += pQuery->interval.sliding * factor; - tw->ekey = tw->skey + pQuery->interval.interval - 1; +static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) { + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); + if (pQueryAttr->interval.intervalUnit != 'n' && pQueryAttr->interval.intervalUnit != 'y') { + tw->skey += pQueryAttr->interval.sliding * factor; + tw->ekey = tw->skey + pQueryAttr->interval.interval - 1; return; } - int64_t key = tw->skey / 1000, interval = pQuery->interval.interval; - if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) { + int64_t key = tw->skey / 1000, interval = pQueryAttr->interval.interval; + if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) { key /= 1000; } - if (pQuery->interval.intervalUnit == 'y') { + if (pQueryAttr->interval.intervalUnit == 'y') { interval *= 12; } @@ -159,7 +148,7 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { tm.tm_mon = mon % 12; tw->ekey = mktime(&tm) * 1000L; - if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) { + if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) { tw->skey *= 1000L; tw->ekey *= 1000L; } @@ -167,8 +156,8 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { } static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes); -static void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, - int32_t numOfCols, int32_t* rowCellInfoOffset); +static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResult, SQLFunctionCtx* pCtx, + int32_t numOfCols, int32_t* rowCellInfoOffset); void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); @@ -176,36 +165,20 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex); static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo); -static bool hasMainOutput(SQuery *pQuery); +static bool hasMainOutput(SQueryAttr *pQueryAttr); static int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, STableQueryInfo *pTableQueryInfo); static void releaseQueryBuf(size_t numOfTables); static int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order); -static STsdbQueryCond createTsdbQueryCond(SQuery* pQuery, STimeWindow* win); +static STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* win); static STableIdInfo createTableIdInfo(STableQueryInfo* pTableQueryInfo); static void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInfo* pDownstream); +static int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, + SSingleColumnFilterInfo** pFilterInfo, uint64_t qId); +static void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols); -static int32_t getNumOfScanTimes(SQuery* pQuery); -static bool isFixedOutputQuery(SQuery* pQuery); - -static SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); -static SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime); -static SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); - -static SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); -static SOperatorInfo* createOffsetOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); -static SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput); -static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); -static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr); static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); @@ -220,9 +193,8 @@ static int32_t getGroupbyColumnIndex(SSqlGroupbyExpr *pGroupbyExpr, SSDataBlock* static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOperatorInfo *pInfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex); static void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size); -static void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win); -static bool isPointInterpoQuery(SQuery *pQuery); -static void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo); +static void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win); +static void setResultBufSize(SQueryAttr* pQueryAttr, SRspResultInfo* pResultInfo); static void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExprInfo* pExprInfo, void* pTable); static void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr); static void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes); @@ -231,7 +203,7 @@ static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowIn int32_t groupIndex); // setup the output buffer for each operator -static SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) { +SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) { const static int32_t minSize = 8; SSDataBlock *res = calloc(1, sizeof(SSDataBlock)); @@ -240,18 +212,19 @@ static SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32 res->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); for (int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData idata = {{0}}; - idata.info.type = pExpr[i].type; - idata.info.bytes = pExpr[i].bytes; + idata.info.type = pExpr[i].base.resType; + idata.info.bytes = pExpr[i].base.resBytes; idata.info.colId = pExpr[i].base.resColId; - idata.pData = calloc(1, MAX(idata.info.bytes * numOfRows, minSize)); // at least to hold a pointer on x64 platform + int32_t size = MAX(idata.info.bytes * numOfRows, minSize); + idata.pData = calloc(1, size); // at least to hold a pointer on x64 platform taosArrayPush(res->pDataBlock, &idata); } return res; } -static void* destroyOutputBuf(SSDataBlock* pBlock) { +void* destroyOutputBuf(SSDataBlock* pBlock) { if (pBlock == NULL) { return NULL; } @@ -269,8 +242,8 @@ static void* destroyOutputBuf(SSDataBlock* pBlock) { } int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput) { - SQuery *pQuery = pRuntimeEnv->pQuery; - bool hasMainFunction = hasMainOutput(pQuery); + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; + bool hasMainFunction = hasMainOutput(pQueryAttr); int32_t maxOutput = 0; for (int32_t j = 0; j < numOfOutput; ++j) { @@ -301,37 +274,6 @@ static void clearNumOfRes(SQLFunctionCtx* pCtx, int32_t numOfOutput) { } } -static bool isGroupbyColumn(SSqlGroupbyExpr *pGroupbyExpr) { - if (pGroupbyExpr == NULL || pGroupbyExpr->numOfGroupCols == 0) { - return false; - } - - for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) { - SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i); - if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) { - //make sure the normal column locates at the second position if tbname exists in group by clause - if (pGroupbyExpr->numOfGroupCols > 1) { - assert(pColIndex->colIndex > 0); - } - - return true; - } - } - - return false; -} - -static bool isStabledev(SQuery* pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pExpr1[i].base.functionId; - if (functId == TSDB_FUNC_STDDEV_DST) { - return true; - } - } - - return false; -} - static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) { bool hasTags = false; int32_t numOfSelectivity = 0; @@ -351,9 +293,9 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput return (numOfSelectivity > 0 && hasTags); } -static bool isProjQuery(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pExpr1[i].base.functionId; +static bool isProjQuery(SQueryAttr *pQueryAttr) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functId = pQueryAttr->pExpr1[i].base.functionId; if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) { return false; } @@ -362,52 +304,6 @@ static bool isProjQuery(SQuery *pQuery) { return true; } -static bool isTsCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } - -static bool isTopBottomQuery(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; - if (functionId == TSDB_FUNC_TS) { - continue; - } - - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - return true; - } - } - - return false; -} - -static bool timeWindowInterpoRequired(SQuery *pQuery) { - for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; - if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) { - return true; - } - } - - return false; -} - -static bool hasTagValOutput(SQuery* pQuery) { - SExprInfo *pExprInfo = &pQuery->pExpr1[0]; - if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { - return true; - } else { // set tag value, by which the results are aggregated. - for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { - SExprInfo *pLocalExprInfo = &pQuery->pExpr1[idx]; - - // ts_comp column required the tag value for join filter - if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { - return true; - } - } - } - - return false; -} - static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) { if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { return false; @@ -455,7 +351,7 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes (SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); // in case of repeat scan/reverse scan, no new time window added. - if (QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQueryAttr)) { if (!masterscan) { // the *p1 may be NULL in case of sliding+offset exists. return (p1 != NULL)? *p1:NULL; } @@ -479,7 +375,6 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes prepareResultListBuffer(pResultRowInfo, pRuntimeEnv); SResultRow *pResult = NULL; - if (p1 == NULL) { pResult = getNewResultRow(pRuntimeEnv->pool); int32_t ret = initResultRow(pResult); @@ -505,19 +400,19 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes return getResultRow(pResultRowInfo, pResultRowInfo->curIndex); } -static void getInitialStartTimeWindow(SQuery* pQuery, TSKEY ts, STimeWindow* w) { - if (QUERY_IS_ASC_QUERY(pQuery)) { - getAlignQueryTimeWindow(pQuery, ts, ts, pQuery->window.ekey, w); +static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + getAlignQueryTimeWindow(pQueryAttr, ts, ts, pQueryAttr->window.ekey, w); } else { // the start position of the first time window in the endpoint that spreads beyond the queried last timestamp - getAlignQueryTimeWindow(pQuery, ts, pQuery->window.ekey, ts, w); + getAlignQueryTimeWindow(pQueryAttr, ts, pQueryAttr->window.ekey, ts, w); int64_t key = w->skey; while(key < ts) { // moving towards end - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - key = taosTimeAdd(key, pQuery->interval.sliding, pQuery->interval.slidingUnit, pQuery->precision); + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + key = taosTimeAdd(key, pQueryAttr->interval.sliding, pQueryAttr->interval.slidingUnit, pQueryAttr->precision); } else { - key += pQuery->interval.sliding; + key += pQueryAttr->interval.sliding; } if (key >= ts) { @@ -530,21 +425,21 @@ static void getInitialStartTimeWindow(SQuery* pQuery, TSKEY ts, STimeWindow* w) } // get the correct time window according to the handled timestamp -static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQuery *pQuery) { +static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) { STimeWindow w = {0}; if (pResultRowInfo->curIndex == -1) { // the first window, from the previous stored value if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { - getInitialStartTimeWindow(pQuery, ts, &w); + getInitialStartTimeWindow(pQueryAttr, ts, &w); pResultRowInfo->prevSKey = w.skey; } else { w.skey = pResultRowInfo->prevSKey; } - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; } else { - w.ekey = w.skey + pQuery->interval.interval - 1; + w.ekey = w.skey + pQueryAttr->interval.interval - 1; } } else { int32_t slot = curTimeWindowIndex(pResultRowInfo); @@ -553,23 +448,23 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t } if (w.skey > ts || w.ekey < ts) { - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - w.skey = taosTimeTruncate(ts, &pQuery->interval, pQuery->precision); - w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + w.skey = taosTimeTruncate(ts, &pQueryAttr->interval, pQueryAttr->precision); + w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; } else { int64_t st = w.skey; if (st > ts) { - st -= ((st - ts + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding; + st -= ((st - ts + pQueryAttr->interval.sliding - 1) / pQueryAttr->interval.sliding) * pQueryAttr->interval.sliding; } - int64_t et = st + pQuery->interval.interval - 1; + int64_t et = st + pQueryAttr->interval.interval - 1; if (et < ts) { - st += ((ts - et + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding; + st += ((ts - et + pQueryAttr->interval.sliding - 1) / pQueryAttr->interval.sliding) * pQueryAttr->interval.sliding; } w.skey = st; - w.ekey = w.skey + pQuery->interval.interval - 1; + w.ekey = w.skey + pQueryAttr->interval.interval - 1; } } @@ -577,8 +472,8 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t * query border check, skey should not be bounded by the query time range, since the value skey will * be used as the time window index value. So we only change ekey of time window accordingly. */ - if (w.ekey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) { - w.ekey = pQuery->window.ekey; + if (w.ekey > pQueryAttr->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) { + w.ekey = pQueryAttr->window.ekey; } return w; @@ -643,7 +538,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRow // not assign result buffer yet, add new result buffer if (pResultRow->pageId == -1) { - int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) groupId, pRuntimeEnv->pQuery->intermediateResultRowSize); + int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) groupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize); if (ret != TSDB_CODE_SUCCESS) { return -1; } @@ -754,28 +649,28 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, } } -static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQuery* pQuery, TSKEY lastKey) { - bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); - if ((lastKey > pQuery->window.ekey && ascQuery) || (lastKey < pQuery->window.ekey && (!ascQuery))) { +static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQueryAttr* pQueryAttr, TSKEY lastKey) { + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); + if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) { closeAllResultRows(pResultRowInfo); pResultRowInfo->curIndex = pResultRowInfo->size - 1; } else { int32_t step = ascQuery ? 1 : -1; - doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQuery->timeWindowInterpo); + doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo); } } -static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn, +static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn, int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, bool updateLastKey) { assert(startPos >= 0 && startPos < pDataBlockInfo->rows); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + STableQueryInfo* item = pRuntimeEnv->current; int32_t num = -1; - int32_t order = pQuery->order.order; + int32_t order = pQueryAttr->order.order; int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); - STableQueryInfo* item = pQuery->current; - - if (QUERY_IS_ASC_QUERY(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { if (ekey < pDataBlockInfo->window.ekey) { num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn); if (updateLastKey) { // update the last key @@ -807,7 +702,7 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; bool hasPrev = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < numOfOutput; ++k) { @@ -816,7 +711,7 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx char* start = pCtx[k].pInput; - int32_t pos = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1); if (pCtx[k].pInput != NULL) { pCtx[k].pInput = (char *)pCtx[k].pInput + pos * pCtx[k].inputBytes; } @@ -844,42 +739,42 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } -static int32_t getNextQualifiedWindow(SQuery* pQuery, STimeWindow *pNext, SDataBlockInfo *pDataBlockInfo, +static int32_t getNextQualifiedWindow(SQueryAttr* pQueryAttr, STimeWindow *pNext, SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys, __block_search_fn_t searchFn, int32_t prevPosition) { - getNextTimeWindow(pQuery, pNext); + getNextTimeWindow(pQueryAttr, pNext); // next time window is not in current block - if ((pNext->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (pNext->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + if ((pNext->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) || + (pNext->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQueryAttr))) { return -1; } TSKEY startKey = -1; - if (QUERY_IS_ASC_QUERY(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { startKey = pNext->skey; - if (startKey < pQuery->window.skey) { - startKey = pQuery->window.skey; + if (startKey < pQueryAttr->window.skey) { + startKey = pQueryAttr->window.skey; } } else { startKey = pNext->ekey; - if (startKey > pQuery->window.skey) { - startKey = pQuery->window.skey; + if (startKey > pQueryAttr->window.skey) { + startKey = pQueryAttr->window.skey; } } int32_t startPos = 0; // tumbling time window query, a special case of sliding time window query - if (pQuery->interval.sliding == pQuery->interval.interval && prevPosition != -1) { - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + if (pQueryAttr->interval.sliding == pQueryAttr->interval.interval && prevPosition != -1) { + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); startPos = prevPosition + factor; } else { - if (startKey <= pDataBlockInfo->window.skey && QUERY_IS_ASC_QUERY(pQuery)) { + if (startKey <= pDataBlockInfo->window.skey && QUERY_IS_ASC_QUERY(pQueryAttr)) { startPos = 0; - } else if (startKey >= pDataBlockInfo->window.ekey && !QUERY_IS_ASC_QUERY(pQuery)) { + } else if (startKey >= pDataBlockInfo->window.ekey && !QUERY_IS_ASC_QUERY(pQueryAttr)) { startPos = pDataBlockInfo->rows - 1; } else { - startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order); + startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQueryAttr->order.order); } } @@ -888,29 +783,29 @@ static int32_t getNextQualifiedWindow(SQuery* pQuery, STimeWindow *pNext, SDataB * this case may happen when the time window is too small */ if (primaryKeys == NULL) { - if (QUERY_IS_ASC_QUERY(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { assert(pDataBlockInfo->window.skey <= pNext->ekey); } else { assert(pDataBlockInfo->window.ekey >= pNext->skey); } } else { - if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) { + if (QUERY_IS_ASC_QUERY(pQueryAttr) && primaryKeys[startPos] > pNext->ekey) { TSKEY next = primaryKeys[startPos]; - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); - pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + pNext->skey = taosTimeTruncate(next, &pQueryAttr->interval, pQueryAttr->precision); + pNext->ekey = taosTimeAdd(pNext->skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; } else { - pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding; - pNext->skey = pNext->ekey - pQuery->interval.interval + 1; + pNext->ekey += ((next - pNext->ekey + pQueryAttr->interval.sliding - 1)/pQueryAttr->interval.sliding) * pQueryAttr->interval.sliding; + pNext->skey = pNext->ekey - pQueryAttr->interval.interval + 1; } - } else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) { + } else if ((!QUERY_IS_ASC_QUERY(pQueryAttr)) && primaryKeys[startPos] < pNext->skey) { TSKEY next = primaryKeys[startPos]; - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); - pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + pNext->skey = taosTimeTruncate(next, &pQueryAttr->interval, pQueryAttr->precision); + pNext->ekey = taosTimeAdd(pNext->skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; } else { - pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding; - pNext->ekey = pNext->skey + pQuery->interval.interval - 1; + pNext->skey -= ((pNext->skey - next + pQueryAttr->interval.sliding - 1) / pQueryAttr->interval.sliding) * pQueryAttr->interval.sliding; + pNext->ekey = pNext->skey + pQueryAttr->interval.interval - 1; } } } @@ -918,17 +813,17 @@ static int32_t getNextQualifiedWindow(SQuery* pQuery, STimeWindow *pNext, SDataB return startPos; } -static FORCE_INLINE TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) { +static FORCE_INLINE TSKEY reviseWindowEkey(SQueryAttr *pQueryAttr, STimeWindow *pWindow) { TSKEY ekey = -1; - if (QUERY_IS_ASC_QUERY(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { ekey = pWindow->ekey; - if (ekey > pQuery->window.ekey) { - ekey = pQuery->window.ekey; + if (ekey > pQueryAttr->window.ekey) { + ekey = pQueryAttr->window.ekey; } } else { ekey = pWindow->skey; - if (ekey < pQuery->window.ekey) { - ekey = pQuery->window.ekey; + if (ekey < pQueryAttr->window.ekey) { + ekey = pQueryAttr->window.ekey; } } @@ -956,17 +851,17 @@ static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* return; } - SQuery* pQuery = pRuntimeEnv->pQuery; - for (int32_t k = 0; k < pQuery->numOfCols; ++k) { + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + for (int32_t k = 0; k < pQueryAttr->numOfCols; ++k) { SColumnInfoData *pColInfo = taosArrayGet(pDataBlock, k); memcpy(pRuntimeEnv->prevRow[k], ((char*)pColInfo->pData) + (pColInfo->info.bytes * rowIndex), pColInfo->info.bytes); } } -static TSKEY getStartTsKey(SQuery* pQuery, STimeWindow* win, const TSKEY* tsCols, int32_t rows) { +static TSKEY getStartTsKey(SQueryAttr* pQueryAttr, STimeWindow* win, const TSKEY* tsCols, int32_t rows) { TSKEY ts = TSKEY_INITIAL_VAL; - bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); if (tsCols == NULL) { ts = ascQuery? win->skey : win->ekey; } else { @@ -979,7 +874,7 @@ static TSKEY getStartTsKey(SQuery* pQuery, STimeWindow* win, const TSKEY* tsCols static void setArithParams(SArithmeticSupport* sas, SExprInfo *pExprInfo, SSDataBlock* pSDataBlock) { sas->numOfCols = (int32_t) pSDataBlock->info.numOfCols; - sas->pArithExpr = pExprInfo; + sas->pExprInfo = pExprInfo; sas->colList = calloc(1, pSDataBlock->info.numOfCols*sizeof(SColumnInfo)); for(int32_t i = 0; i < sas->numOfCols; ++i) { @@ -1007,7 +902,7 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SQLFunctionCtx* pC } } -static void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order) { +void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order) { if (pCtx[0].functionId == TSDB_FUNC_ARITHM) { SArithmeticSupport* pSupport = (SArithmeticSupport*) pCtx[0].param[1].pz; if (pSupport->colList == NULL) { @@ -1036,19 +931,30 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, setArithParams((SArithmeticSupport*)pCtx[i].param[1].pz, &pOperator->pExpr[i], pBlock); } else { SColIndex* pCol = &pOperator->pExpr[i].base.colInfo; - if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || pCol->colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { + if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || (pCol->colId == TSDB_BLOCK_DIST_COLUMN_INDEX) || + (TSDB_COL_IS_TAG(pCol->flag) && pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) { SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo; SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, pColIndex->colIndex); // in case of the block distribution query, the inputBytes is not a constant value. pCtx[i].pInput = p->pData; - assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type);// && pCtx[i].inputBytes == p->info.bytes); + assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type); uint32_t status = aAggs[pCtx[i].functionId].status; if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) { SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); pCtx[i].ptsList = (int64_t*) tsInfo->pData; } + } else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) { + SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo; + SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, pColIndex->colIndex); + + pCtx[i].pInput = p->pData; + assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type); + for(int32_t j = 0; j < pBlock->info.rows; ++j) { + char* dst = p->pData + j * p->info.bytes; + tVariantDump(&pOperator->pExpr[i].base.param[1], dst, p->info.type, true); + } } } } @@ -1067,10 +973,16 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction } static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; for (int32_t k = 0; k < numOfOutput; ++k) { - pCtx[k].startTs = pQuery->window.skey; + pCtx[k].startTs = pQueryAttr->window.skey; + + // Always set the asc order for merge stage process + if (pCtx[k].currentStage == MERGE_STAGE) { + pCtx[k].order = TSDB_ORDER_ASC; + } + aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); } } @@ -1133,9 +1045,9 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SQLFunctionCtx* pCtx, int32_t pos, int32_t numOfRows, SArray* pDataBlock, const TSKEY* tsCols, STimeWindow* win) { SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); TSKEY curTs = tsCols[pos]; TSKEY lastTs = *(TSKEY *) pRuntimeEnv->prevRow[0]; @@ -1153,7 +1065,7 @@ static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SQLF return true; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); TSKEY prevTs = ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery))? lastTs:tsCols[pos - step]; doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, prevTs, pos - step, curTs, pos, @@ -1164,15 +1076,15 @@ static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SQLF static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SQLFunctionCtx* pCtx, int32_t endRowIndex, SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, STimeWindow* win) { SQueryRuntimeEnv *pRuntimeEnv = pOperatorInfo->pRuntimeEnv; - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t numOfOutput = pOperatorInfo->numOfOutput; TSKEY actualEndKey = tsCols[endRowIndex]; - TSKEY key = QUERY_IS_ASC_QUERY(pQuery)? win->ekey:win->skey; + TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? win->ekey:win->skey; // not ended in current data block, do not invoke interpolation - if ((key > blockEkey && QUERY_IS_ASC_QUERY(pQuery)) || (key < blockEkey && !QUERY_IS_ASC_QUERY(pQuery))) { + if ((key > blockEkey && QUERY_IS_ASC_QUERY(pQueryAttr)) || (key < blockEkey && !QUERY_IS_ASC_QUERY(pQueryAttr))) { setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP); return false; } @@ -1183,7 +1095,7 @@ static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SQLFun return true; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); int32_t nextRowIndex = endRowIndex + step; assert(nextRowIndex >= 0); @@ -1196,13 +1108,13 @@ static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SQLFun static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlock, SQLFunctionCtx* pCtx, SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardStep) { SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; - SQuery* pQuery = pRuntimeEnv->pQuery; - if (!pQuery->timeWindowInterpo) { + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + if (!pQueryAttr->timeWindowInterpo) { return; } assert(pBlock != NULL); - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, 0); @@ -1216,11 +1128,11 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); } // point interpolation does not require the end key time window interpolation. - if (isPointInterpoQuery(pQuery)) { + if (pQueryAttr->pointInterpQuery) { return; } @@ -1229,13 +1141,13 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc if (!done) { int32_t endRowIndex = startPos + (forwardStep - 1) * step; - TSKEY endKey = QUERY_IS_ASC_QUERY(pQuery)? pBlock->info.window.ekey:pBlock->info.window.skey; + TSKEY endKey = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey:pBlock->info.window.skey; bool interp = setTimeWindowInterpolationEndTs(pOperatorInfo, pCtx, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pQuery->numOfOutput, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pCtx, pQueryAttr->numOfOutput, RESULT_ROW_END_INTERP); } } @@ -1244,10 +1156,10 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; int32_t numOfOutput = pOperatorInfo->numOfOutput; - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); int32_t prevIndex = curTimeWindowIndex(pResultRowInfo); @@ -1260,9 +1172,9 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); - TSKEY ts = getStartTsKey(pQuery, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); - STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQuery); + STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SResultRow* pResult = NULL; @@ -1273,13 +1185,13 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } int32_t forwardStep = 0; - TSKEY ekey = reviseWindowEkey(pQuery, &win); + TSKEY ekey = reviseWindowEkey(pQueryAttr, &win); forwardStep = - getNumOfRowsInTimeWindow(pQuery, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); + getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); // prev time window not interpolation yet. int32_t curIndex = curTimeWindowIndex(pResultRowInfo); - if (prevIndex != -1 && prevIndex < curIndex && pQuery->timeWindowInterpo) { + if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) { for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. SResultRow* pRes = pResultRowInfo->pResult[j]; if (pRes->closed) { @@ -1301,7 +1213,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul -1, tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); } @@ -1321,7 +1233,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul STimeWindow nextWin = win; while (1) { int32_t prevEndPos = (forwardStep - 1) * step + startPos; - startPos = getNextQualifiedWindow(pQuery, &nextWin, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); + startPos = getNextQualifiedWindow(pQueryAttr, &nextWin, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); if (startPos < 0) { break; } @@ -1333,33 +1245,34 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - ekey = reviseWindowEkey(pQuery, &nextWin); - forwardStep = getNumOfRowsInTimeWindow(pQuery, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); + ekey = reviseWindowEkey(pQueryAttr, &nextWin); + forwardStep = getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &nextWin, startPos, forwardStep); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &nextWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } - if (pQuery->timeWindowInterpo) { + if (pQueryAttr->timeWindowInterpo) { int32_t rowIndex = ascQuery? (pSDataBlock->info.rows-1):0; saveDataBlockLastRow(pRuntimeEnv, &pSDataBlock->info, pSDataBlock->pDataBlock, rowIndex); } - updateResultRowInfoActiveIndex(pResultRowInfo, pQuery, pQuery->current->lastKey); + updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey); } static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; - STableQueryInfo* item = pRuntimeEnv->pQuery->current; + STableQueryInfo* item = pRuntimeEnv->current; SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex); - int16_t bytes = pColInfoData->info.bytes; - int16_t type = pColInfoData->info.type; - SQuery *pQuery = pRuntimeEnv->pQuery; + + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int16_t bytes = pColInfoData->info.bytes; + int16_t type = pColInfoData->info.type; if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { - qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv)); + qError("QInfo:0x%"PRIx64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv)); return; } @@ -1377,7 +1290,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn memcpy(pInfo->prevData, val, bytes); - if (pQuery->stableQuery && pQuery->stabledev && (pRuntimeEnv->prevResult != NULL)) { + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); } @@ -1400,7 +1313,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; - STableQueryInfo* item = pRuntimeEnv->pQuery->current; + STableQueryInfo* item = pRuntimeEnv->current; // primary timestamp column SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0); @@ -1408,7 +1321,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SOptrBasicInfo* pBInfo = &pInfo->binfo; - int64_t gap = pOperator->pRuntimeEnv->pQuery->sw.gap; + int64_t gap = pOperator->pRuntimeEnv->pQueryAttr->sw.gap; pInfo->numOfRows = 0; TSKEY* tsList = (TSKEY*)pColInfoData->pData; @@ -1494,7 +1407,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOp setResultRowKey(pResultRow, pData, type); if (pResultRow->pageId == -1) { - int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, groupIndex, pRuntimeEnv->pQuery->resultRowSize); + int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, groupIndex, pRuntimeEnv->pQueryAttr->resultRowSize); if (ret != 0) { return -1; } @@ -1528,7 +1441,7 @@ static int32_t getGroupbyColumnIndex(SSqlGroupbyExpr *pGroupbyExpr, SSDataBlock* static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; // in case of timestamp column, always generated results. if (functionId == TSDB_FUNC_TS) { @@ -1540,12 +1453,12 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx } if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) { - return QUERY_IS_ASC_QUERY(pQuery); + return QUERY_IS_ASC_QUERY(pQueryAttr); } // denote the order type if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) { - return pCtx->param[0].i64 == pQuery->order.order; + return pCtx->param[0].i64 == pQueryAttr->order.order; } // in the reverse table scan, only the following functions need to be executed @@ -1624,7 +1537,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput, int32_t** rowCellInfoOffset) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; SQLFunctionCtx * pFuncCtx = (SQLFunctionCtx *)calloc(numOfOutput, sizeof(SQLFunctionCtx)); if (pFuncCtx == NULL) { @@ -1638,10 +1551,10 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr } for (int32_t i = 0; i < numOfOutput; ++i) { - SSqlFuncMsg *pSqlFuncMsg = &pExpr[i].base; + SSqlExpr *pSqlExpr = &pExpr[i].base; SQLFunctionCtx* pCtx = &pFuncCtx[i]; - SColIndex *pIndex = &pSqlFuncMsg->colInfo; + SColIndex *pIndex = &pSqlExpr->colInfo; if (TSDB_COL_REQ_NULL(pIndex->flag)) { pCtx->requireNull = true; @@ -1650,33 +1563,33 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr pCtx->requireNull = false; } - pCtx->inputBytes = pSqlFuncMsg->colBytes; - pCtx->inputType = pSqlFuncMsg->colType; + pCtx->inputBytes = pSqlExpr->colBytes; + pCtx->inputType = pSqlExpr->colType; pCtx->ptsOutputBuf = NULL; - pCtx->outputBytes = pExpr[i].bytes; - pCtx->outputType = pExpr[i].type; + pCtx->outputBytes = pSqlExpr->resBytes; + pCtx->outputType = pSqlExpr->resType; - pCtx->order = pQuery->order.order; - pCtx->functionId = pSqlFuncMsg->functionId; - pCtx->stableQuery = pQuery->stableQuery; - pCtx->interBufBytes = pExpr[i].interBytes; + pCtx->order = pQueryAttr->order.order; + pCtx->functionId = pSqlExpr->functionId; + pCtx->stableQuery = pQueryAttr->stableQuery; + pCtx->interBufBytes = pSqlExpr->interBytes; pCtx->start.key = INT64_MIN; pCtx->end.key = INT64_MIN; - pCtx->numOfParams = pSqlFuncMsg->numOfParams; + pCtx->numOfParams = pSqlExpr->numOfParams; for (int32_t j = 0; j < pCtx->numOfParams; ++j) { - int16_t type = pSqlFuncMsg->arg[j].argType; - int16_t bytes = pSqlFuncMsg->arg[j].argBytes; - if (pSqlFuncMsg->functionId == TSDB_FUNC_STDDEV_DST) { + int16_t type = pSqlExpr->param[j].nType; + int16_t bytes = pSqlExpr->param[j].nLen; + if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST) { continue; } if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - tVariantCreateFromBinary(&pCtx->param[j], pSqlFuncMsg->arg[j].argValue.pz, bytes, type); + tVariantCreateFromBinary(&pCtx->param[j], pSqlExpr->param[j].pz, bytes, type); } else { - tVariantCreateFromBinary(&pCtx->param[j], (char *)&pSqlFuncMsg->arg[j].argValue.i64, bytes, type); + tVariantCreateFromBinary(&pCtx->param[j], (char *)&pSqlExpr->param[j].i64, bytes, type); } } @@ -1687,30 +1600,30 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr int32_t f = pExpr[0].base.functionId; assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); - pCtx->param[2].i64 = pQuery->order.order; + pCtx->param[2].i64 = pQueryAttr->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[3].i64 = functionId; pCtx->param[3].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[1].i64 = pQuery->order.orderColId; + pCtx->param[1].i64 = pQueryAttr->order.orderColId; } else if (functionId == TSDB_FUNC_INTERP) { - pCtx->param[2].i64 = (int8_t)pQuery->fillType; - if (pQuery->fillVal != NULL) { - if (isNull((const char *)&pQuery->fillVal[i], pCtx->inputType)) { + pCtx->param[2].i64 = (int8_t)pQueryAttr->fillType; + if (pQueryAttr->fillVal != NULL) { + if (isNull((const char *)&pQueryAttr->fillVal[i], pCtx->inputType)) { pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; } else { // todo refactor, tVariantCreateFromBinary should handle the NULL value if (pCtx->inputType != TSDB_DATA_TYPE_BINARY && pCtx->inputType != TSDB_DATA_TYPE_NCHAR) { - tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQuery->fillVal[i], pCtx->inputBytes, pCtx->inputType); + tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQueryAttr->fillVal[i], pCtx->inputBytes, pCtx->inputType); } } } } else if (functionId == TSDB_FUNC_TS_COMP) { - pCtx->param[0].i64 = pQuery->vgId; //TODO this should be the parameter from client + pCtx->param[0].i64 = pQueryAttr->vgId; //TODO this should be the parameter from client pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; } else if (functionId == TSDB_FUNC_TWA) { - pCtx->param[1].i64 = pQuery->window.skey; + pCtx->param[1].i64 = pQueryAttr->window.skey; pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[2].i64 = pQuery->window.ekey; + pCtx->param[2].i64 = pQueryAttr->window.ekey; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; } else if (functionId == TSDB_FUNC_ARITHM) { pCtx->param[1].pz = (char*) &pRuntimeEnv->sasArray[i]; @@ -1719,7 +1632,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr for(int32_t i = 1; i < numOfOutput; ++i) { (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + - pExpr[i - 1].interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery)); + pExpr[i - 1].base.interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); } setCtxTagColumnInfo(pFuncCtx, numOfOutput); @@ -1745,115 +1658,159 @@ static void* destroySQLFunctionCtx(SQLFunctionCtx* pCtx, int32_t numOfOutput) { return NULL; } -static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables) { - qDebug("QInfo:%"PRIu64" setup runtime env", GET_QID(pRuntimeEnv)); - SQuery *pQuery = pRuntimeEnv->pQuery; +static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables, SArray* pOperator, void* merger) { + qDebug("QInfo:0x%"PRIx64" setup runtime env", GET_QID(pRuntimeEnv)); + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pRuntimeEnv->prevGroupId = INT32_MIN; - pRuntimeEnv->pQuery = pQuery; + pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->keyBuf = malloc(pQuery->maxSrcColumnSize + sizeof(int64_t)); + pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t)); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); - pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + pQuery->srcRowSize); - pRuntimeEnv->tagVal = malloc(pQuery->tagLen); - pRuntimeEnv->currentOffset = pQuery->limit.offset; + pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); + pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); + pRuntimeEnv->currentOffset = pQueryAttr->limit.offset; // NOTE: pTableCheckInfo need to update the query time range and the lastKey info pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); - pRuntimeEnv->sasArray = calloc(pQuery->numOfOutput, sizeof(SArithmeticSupport)); + pRuntimeEnv->sasArray = calloc(pQueryAttr->numOfOutput, sizeof(SArithmeticSupport)); if (pRuntimeEnv->sasArray == NULL || pRuntimeEnv->pResultRowHashTable == NULL || pRuntimeEnv->keyBuf == NULL || pRuntimeEnv->prevRow == NULL || pRuntimeEnv->tagVal == NULL) { goto _clean; } - if (pQuery->numOfCols) { - char* start = POINTER_BYTES * pQuery->numOfCols + (char*) pRuntimeEnv->prevRow; + if (pQueryAttr->numOfCols) { + char* start = POINTER_BYTES * pQueryAttr->numOfCols + (char*) pRuntimeEnv->prevRow; pRuntimeEnv->prevRow[0] = start; - for(int32_t i = 1; i < pQuery->numOfCols; ++i) { - pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQuery->colList[i-1].bytes; + for(int32_t i = 1; i < pQueryAttr->numOfCols; ++i) { + pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQueryAttr->tableCols[i-1].bytes; } *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN; } - qDebug("QInfo:%"PRIu64" init runtime environment completed", GET_QID(pRuntimeEnv)); + qDebug("QInfo:0x%"PRIx64" init runtime environment completed", GET_QID(pRuntimeEnv)); // group by normal column, sliding window query, interval query are handled by interval query processor // interval (down sampling operation) - if (onlyQueryTags(pQuery)) { // do nothing for tags query - - } else if (QUERY_IS_INTERVAL_QUERY(pQuery)) { - if (pQuery->stableQuery) { - pRuntimeEnv->proot = createMultiTableTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, - pQuery->pExpr1, pQuery->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); - } else { - pRuntimeEnv->proot = - createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); + int32_t numOfOperator = (int32_t) taosArrayGetSize(pOperator); + for(int32_t i = 0; i < numOfOperator; ++i) { + int32_t* op = taosArrayGet(pOperator, i); - if (pQuery->pExpr2 != NULL) { + switch (*op) { + case OP_TagScan: { + pRuntimeEnv->proot = createTagScanOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + break; + } + case OP_MultiTableTimeInterval: { + pRuntimeEnv->proot = + createMultiTableTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + break; + } + case OP_TimeWindow: { + pRuntimeEnv->proot = + createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + break; + } + case OP_Groupby: { + pRuntimeEnv->proot = + createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + break; + } + case OP_SessionWindow: { pRuntimeEnv->proot = - createArithOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr2, pQuery->numOfExpr2); + createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + break; + } + case OP_MultiTableAggregate: { + pRuntimeEnv->proot = + createMultiTableAggOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + break; + } + case OP_Aggregate: { + pRuntimeEnv->proot = + createAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot->upstream->operatorType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream->info, pRuntimeEnv->proot); + } + break; } - if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { - SOperatorInfo* pInfo = pRuntimeEnv->proot; - pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput); + case OP_Arithmetic: { // TODO refactor to remove arith operator. + SOperatorInfo* prev = pRuntimeEnv->proot; + if (i == 0) { + pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot != NULL && pRuntimeEnv->proot->operatorType != OP_DummyInput) { // TODO refactor + setTableScanFilterOperatorInfo(prev->info, pRuntimeEnv->proot); + } + } else { + prev = pRuntimeEnv->proot; + assert(pQueryAttr->pExpr2 != NULL); + pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); + } + break; } - } - } else if (pQuery->groupbyColumn) { - pRuntimeEnv->proot = - createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); + case OP_Limit: { + pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); + break; + } - if (pQuery->pExpr2 != NULL) { - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr2, pQuery->numOfExpr2); - } - } else if (pQuery->sw.gap > 0) { - pRuntimeEnv->proot = createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); + case OP_Filter: { // todo refactor + assert(pQueryAttr->havingNum > 0); + if (pQueryAttr->stableQuery) { + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, pQueryAttr->numOfExpr3); + } else { + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + } + break; + } - if (pQuery->pExpr2 != NULL) { - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr2, pQuery->numOfExpr2); - } - } else if (isFixedOutputQuery(pQuery)) { - if (pQuery->stableQuery && !isTsCompQuery(pQuery)) { - pRuntimeEnv->proot = - createMultiTableAggOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - } else { - pRuntimeEnv->proot = - createAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - } + case OP_Fill: { + SOperatorInfo* pInfo = pRuntimeEnv->proot; + pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput); + break; + } - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); + case OP_MultiwayMergeSort: { + bool groupMix = true; + if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { + groupMix = false; + } + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, + 4096, merger, groupMix); // TODO hack it + break; + } - if (pQuery->pExpr2 != NULL) { - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr2, pQuery->numOfExpr2); - } - } else { // diff/add/multiply/subtract/division - assert(pQuery->checkResultBuf == 1); - if (!onlyQueryTags(pQuery)) { - pRuntimeEnv->proot = - createArithOperatorInfo(pRuntimeEnv, pRuntimeEnv->pTableScanner, pQuery->pExpr1, pQuery->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->pTableScanner->info, pRuntimeEnv->proot); - } - } + case OP_GlobalAggregate: { + pRuntimeEnv->proot = createGlobalAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, merger); + break; + } - if (pQuery->havingNum > 0) { - pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput); - } + case OP_SLimit: { + pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, merger); + break; + } - if (pQuery->limit.offset > 0) { - pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); - } + case OP_Distinct: { + pRuntimeEnv->proot = createDistinctOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + break; + } - if (pQuery->limit.limit > 0) { - pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); + default: { + assert(0); + } + } } return TSDB_CODE_SUCCESS; @@ -1869,23 +1826,36 @@ _clean: } static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle); pRuntimeEnv->pQueryHandle = NULL; - SMemRef* pMemRef = &pQuery->memRef; + SMemRef* pMemRef = &pQueryAttr->memRef; assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL); } +static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQueryAttr *pQueryAttr) { + if (pQueryAttr->tsCompQuery && pRuntimeEnv->outputBuf && pRuntimeEnv->outputBuf->pDataBlock && taosArrayGetSize(pRuntimeEnv->outputBuf->pDataBlock) > 0) { + SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0); + if (pColInfoData) { + FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor + if (f) { + fclose(f); + *(FILE **)pColInfoData->pData = NULL; + } + } + } +} + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo; - qDebug("QInfo:%"PRIu64" teardown runtime env", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" teardown runtime env", pQInfo->qId); if (pRuntimeEnv->sasArray != NULL) { - for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { + for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { tfree(pRuntimeEnv->sasArray[i].data); tfree(pRuntimeEnv->sasArray[i].colList); } @@ -1896,6 +1866,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { destroyResultBuf(pRuntimeEnv->pResultBuf); doFreeQueryHandle(pRuntimeEnv); + destroyTsComp(pRuntimeEnv, pQueryAttr); + pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf); tfree(pRuntimeEnv->keyBuf); @@ -1913,7 +1885,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); pRuntimeEnv->prevResult = NULL; - } static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { @@ -1941,53 +1912,47 @@ bool isQueryKilled(SQInfo *pQInfo) { void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} -static bool isFixedOutputQuery(SQuery* pQuery) { - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { - return false; - } - - // Note:top/bottom query is fixed output query - if (pQuery->topBotQuery || pQuery->groupbyColumn || isTsCompQuery(pQuery)) { - return true; - } - - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; - - // ignore the ts_comp function - if (i == 0 && pExprMsg->functionId == TSDB_FUNC_PRJ && pExprMsg->numOfParams == 1 && - pExprMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - continue; - } - - if (pExprMsg->functionId == TSDB_FUNC_TS || pExprMsg->functionId == TSDB_FUNC_TS_DUMMY) { - continue; - } - - if (!IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].status)) { - return true; - } - } - - return false; -} +//static bool isFixedOutputQuery(SQueryAttr* pQueryAttr) { +// if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { +// return false; +// } +// +// // Note:top/bottom query is fixed output query +// if (pQueryAttr->topBotQuery || pQueryAttr->groupbyColumn || pQueryAttr->tsCompQuery) { +// return true; +// } +// +// for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { +// SSqlExpr *pExpr = &pQueryAttr->pExpr1[i].base; +// +// if (pExpr->functionId == TSDB_FUNC_TS || pExpr->functionId == TSDB_FUNC_TS_DUMMY) { +// continue; +// } +// +// if (!IS_MULTIOUTPUT(aAggs[pExpr->functionId].status)) { +// return true; +// } +// } +// +// return false; +//} // todo refactor with isLastRowQuery -bool isPointInterpoQuery(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; - if (functionId == TSDB_FUNC_INTERP) { - return true; - } - } - - return false; -} +//bool isPointInterpoQuery(SQueryAttr *pQueryAttr) { +// for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { +// int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; +// if (functionId == TSDB_FUNC_INTERP) { +// return true; +// } +// } +// +// return false; +//} // TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION -static UNUSED_FUNC bool isSumAvgRateQuery(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; +static UNUSED_FUNC bool isSumAvgRateQuery(SQueryAttr *pQueryAttr) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS) { continue; } @@ -2001,9 +1966,9 @@ static UNUSED_FUNC bool isSumAvgRateQuery(SQuery *pQuery) { return false; } -static bool isFirstLastRowQuery(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionID = pQuery->pExpr1[i].base.functionId; +static bool isFirstLastRowQuery(SQueryAttr *pQueryAttr) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functionID = pQueryAttr->pExpr1[i].base.functionId; if (functionID == TSDB_FUNC_LAST_ROW) { return true; } @@ -2012,36 +1977,13 @@ static bool isFirstLastRowQuery(SQuery *pQuery) { return false; } -static bool needReverseScan(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { - continue; - } - - if ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery)) { - return true; - } - - if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) { - // the scan order to acquire the last result of the specified column - int32_t order = (int32_t)pQuery->pExpr1[i].base.arg->argValue.i64; - if (order != pQuery->order.order) { - return true; - } - } - } - - return false; -} - /** * The following 4 kinds of query are treated as the tags query * tagprj, tid_tag query, count(tbname), 'abc' (user defined constant value column) query */ -bool onlyQueryTags(SQuery* pQuery) { - for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SExprInfo* pExprInfo = &pQuery->pExpr1[i]; +bool onlyQueryTags(SQueryAttr* pQueryAttr) { + for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + SExprInfo* pExprInfo = &pQueryAttr->pExpr1[i]; int32_t functionId = pExprInfo->base.functionId; @@ -2058,55 +2000,32 @@ bool onlyQueryTags(SQuery* pQuery) { ///////////////////////////////////////////////////////////////////////////////////////////// -void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) { - assert(key >= keyFirst && key <= keyLast && pQuery->interval.sliding <= pQuery->interval.interval); - win->skey = taosTimeTruncate(key, &pQuery->interval, pQuery->precision); +void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) { + assert(key >= keyFirst && key <= keyLast && pQueryAttr->interval.sliding <= pQueryAttr->interval.interval); + win->skey = taosTimeTruncate(key, &pQueryAttr->interval, pQueryAttr->precision); /* - * if the realSkey > INT64_MAX - pQuery->interval.interval, the query duration between + * if the realSkey > INT64_MAX - pQueryAttr->interval.interval, the query duration between * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. */ - if (keyFirst > (INT64_MAX - pQuery->interval.interval)) { - assert(keyLast - keyFirst < pQuery->interval.interval); + if (keyFirst > (INT64_MAX - pQueryAttr->interval.interval)) { + assert(keyLast - keyFirst < pQueryAttr->interval.interval); win->ekey = INT64_MAX; - } else if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - win->ekey = taosTimeAdd(win->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; - } else { - win->ekey = win->skey + pQuery->interval.interval - 1; - } -} - -static void setScanLimitationByResultBuffer(SQuery *pQuery) { - if (isTopBottomQuery(pQuery)) { - pQuery->checkResultBuf = 0; - } else if (isGroupbyColumn(pQuery->pGroupbyExpr)) { - pQuery->checkResultBuf = 0; + } else if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + win->ekey = taosTimeAdd(win->skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; } else { - bool hasMultioutput = false; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; - if (pExprMsg->functionId == TSDB_FUNC_TS || pExprMsg->functionId == TSDB_FUNC_TS_DUMMY) { - continue; - } - - hasMultioutput = IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].status); - if (!hasMultioutput) { - break; - } - } - - pQuery->checkResultBuf = hasMultioutput ? 1 : 0; + win->ekey = win->skey + pQueryAttr->interval.interval - 1; } } /* * todo add more parameters to check soon.. */ -bool colIdCheck(SQuery *pQuery, uint64_t qId) { +bool colIdCheck(SQueryAttr *pQueryAttr, uint64_t qId) { // load data column information is incorrect - for (int32_t i = 0; i < pQuery->numOfCols - 1; ++i) { - if (pQuery->colList[i].colId == pQuery->colList[i + 1].colId) { - qError("QInfo:%"PRIu64" invalid data load column for query", qId); + for (int32_t i = 0; i < pQueryAttr->numOfCols - 1; ++i) { + if (pQueryAttr->tableCols[i].colId == pQueryAttr->tableCols[i + 1].colId) { + qError("QInfo:0x%"PRIx64" invalid data load column for query", qId); return false; } } @@ -2116,9 +2035,9 @@ bool colIdCheck(SQuery *pQuery, uint64_t qId) { // todo ignore the avg/sum/min/max/count/stddev/top/bottom functions, of which // the scan order is not matter -static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; +static bool onlyOneQueryType(SQueryAttr *pQueryAttr, int32_t functId, int32_t functIdDst) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAG_DUMMY) { @@ -2133,15 +2052,49 @@ static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst return true; } -static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_FIRST, TSDB_FUNC_FIRST_DST); } +static bool onlyFirstQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_FIRST, TSDB_FUNC_FIRST_DST); } + +static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } + +static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { + bool hasFirstLastFunc = false; + bool hasOtherFunc = false; + + if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) { + return status; + } + + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functionId = pQuery->pExpr1[i].base.functionId; + + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || + functionId == TSDB_FUNC_TAG_DUMMY) { + continue; + } + + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { + hasFirstLastFunc = true; + } else { + hasOtherFunc = true; + } + } + + if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) { + if(!hasOtherFunc) { + return BLK_DATA_DISCARD; + } else{ + return BLK_DATA_ALL_NEEDED; + } + } -static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } + return status; +} static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { - SQuery* pQuery = &pQInfo->query; - size_t t = taosArrayGetSize(pQuery->tableGroupInfo.pGroupList); + SQueryAttr* pQueryAttr = &pQInfo->query; + size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList); for(int32_t i = 0; i < t; ++i) { - SArray* p1 = taosArrayGetP(pQuery->tableGroupInfo.pGroupList, i); + SArray* p1 = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); size_t len = taosArrayGetSize(p1); for(int32_t j = 0; j < len; ++j) { @@ -2156,100 +2109,100 @@ static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { } static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) { - SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + SQueryAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; // in case of point-interpolation query, use asc order scan - char msg[] = "QInfo:%"PRIu64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 + char msg[] = "QInfo:0x%"PRIx64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 "-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64; // todo handle the case the the order irrelevant query type mixed up with order critical query type // descending order query for last_row query - if (isFirstLastRowQuery(pQuery)) { - qDebug("QInfo:%"PRIu64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId, pQuery->order.order, TSDB_ORDER_ASC); + if (isFirstLastRowQuery(pQueryAttr)) { + qDebug("QInfo:0x%"PRIx64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId, pQueryAttr->order.order, TSDB_ORDER_ASC); - pQuery->order.order = TSDB_ORDER_ASC; - if (pQuery->window.skey > pQuery->window.ekey) { - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); + pQueryAttr->order.order = TSDB_ORDER_ASC; + if (pQueryAttr->window.skey > pQueryAttr->window.ekey) { + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } return; } - if (isGroupbyColumn(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) { - pQuery->order.order = TSDB_ORDER_ASC; - if (pQuery->window.skey > pQuery->window.ekey) { - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); + if (pQueryAttr->groupbyColumn && pQueryAttr->order.order == TSDB_ORDER_DESC) { + pQueryAttr->order.order = TSDB_ORDER_ASC; + if (pQueryAttr->window.skey > pQueryAttr->window.ekey) { + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } - doExchangeTimeWindow(pQInfo, &pQuery->window); + doExchangeTimeWindow(pQInfo, &pQueryAttr->window); return; } - if (isPointInterpoQuery(pQuery) && pQuery->interval.interval == 0) { - if (!QUERY_IS_ASC_QUERY(pQuery)) { - qDebug(msg, pQInfo->qId, "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey); - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); + if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) { + if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { + qDebug(msg, pQInfo, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } - pQuery->order.order = TSDB_ORDER_ASC; + pQueryAttr->order.order = TSDB_ORDER_ASC; return; } - if (pQuery->interval.interval == 0) { - if (onlyFirstQuery(pQuery)) { - if (!QUERY_IS_ASC_QUERY(pQuery)) { - qDebug(msg, pQInfo->qId, "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey, - pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey); + if (pQueryAttr->interval.interval == 0) { + if (onlyFirstQuery(pQueryAttr)) { + if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { + qDebug(msg, pQInfo, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, + pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQuery->window); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); + doExchangeTimeWindow(pQInfo, &pQueryAttr->window); } - pQuery->order.order = TSDB_ORDER_ASC; - } else if (onlyLastQuery(pQuery)) { - if (QUERY_IS_ASC_QUERY(pQuery)) { - qDebug(msg, pQInfo->qId, "only-last", pQuery->order.order, TSDB_ORDER_DESC, pQuery->window.skey, - pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey); + pQueryAttr->order.order = TSDB_ORDER_ASC; + } else if (onlyLastQuery(pQueryAttr)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, + pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQuery->window); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); + doExchangeTimeWindow(pQInfo, &pQueryAttr->window); } - pQuery->order.order = TSDB_ORDER_DESC; + pQueryAttr->order.order = TSDB_ORDER_DESC; } } else { // interval query if (stableQuery) { - if (onlyFirstQuery(pQuery)) { - if (!QUERY_IS_ASC_QUERY(pQuery)) { - qDebug(msg, pQInfo->qId, "only-first stable", pQuery->order.order, TSDB_ORDER_ASC, - pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey); + if (onlyFirstQuery(pQueryAttr)) { + if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { + qDebug(msg, pQInfo, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC, + pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQuery->window); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); + doExchangeTimeWindow(pQInfo, &pQueryAttr->window); } - pQuery->order.order = TSDB_ORDER_ASC; - } else if (onlyLastQuery(pQuery)) { - if (QUERY_IS_ASC_QUERY(pQuery)) { - qDebug(msg, pQInfo->qId, "only-last stable", pQuery->order.order, TSDB_ORDER_DESC, - pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey); + pQueryAttr->order.order = TSDB_ORDER_ASC; + } else if (onlyLastQuery(pQueryAttr)) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + qDebug(msg, pQInfo, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC, + pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQuery->window); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); + doExchangeTimeWindow(pQInfo, &pQueryAttr->window); } - pQuery->order.order = TSDB_ORDER_DESC; + pQueryAttr->order.order = TSDB_ORDER_DESC; } } } } static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, int32_t* rowsize) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t MIN_ROWS_PER_PAGE = 4; - *rowsize = (int32_t)(pQuery->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery)); + *rowsize = (int32_t)(pQueryAttr->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); int32_t overhead = sizeof(tFilePage); // one page contains at least two rows @@ -2265,17 +2218,17 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i #define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR) static bool doFilterByBlockStatistics(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx, int32_t numOfRows) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - if (pDataStatis == NULL || pQuery->numOfFilterCols == 0) { + if (pDataStatis == NULL || pQueryAttr->numOfFilterCols == 0) { return true; } - for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { - SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; + for (int32_t k = 0; k < pQueryAttr->numOfFilterCols; ++k) { + SSingleColumnFilterInfo *pFilterInfo = &pQueryAttr->pFilterInfo[k]; int32_t index = -1; - for(int32_t i = 0; i < pQuery->numOfCols; ++i) { + for(int32_t i = 0; i < pQueryAttr->numOfCols; ++i) { if (pDataStatis[i].colId == pFilterInfo->info.colId) { index = i; break; @@ -2329,14 +2282,14 @@ static bool doFilterByBlockStatistics(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis return false; } -static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { +static bool overlapWithTimeWindow(SQueryAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) { STimeWindow w = {0}; - TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); - TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); + TSKEY sk = MIN(pQueryAttr->window.skey, pQueryAttr->window.ekey); + TSKEY ek = MAX(pQueryAttr->window.skey, pQueryAttr->window.ekey); - if (QUERY_IS_ASC_QUERY(pQuery)) { - getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, sk, ek, &w); + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + getAlignQueryTimeWindow(pQueryAttr, pBlockInfo->window.skey, sk, ek, &w); assert(w.ekey >= pBlockInfo->window.skey); if (w.ekey < pBlockInfo->window.ekey) { @@ -2344,7 +2297,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - getNextTimeWindow(pQuery, &w); + getNextTimeWindow(pQueryAttr, &w); if (w.skey > pBlockInfo->window.ekey) { break; } @@ -2355,7 +2308,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } } } else { - getAlignQueryTimeWindow(pQuery, pBlockInfo->window.ekey, sk, ek, &w); + getAlignQueryTimeWindow(pQueryAttr, pBlockInfo->window.ekey, sk, ek, &w); assert(w.skey <= pBlockInfo->window.ekey); if (w.skey > pBlockInfo->window.skey) { @@ -2363,7 +2316,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - getNextTimeWindow(pQuery, &w); + getNextTimeWindow(pQueryAttr, &w); if (w.ekey < pBlockInfo->window.skey) { break; } @@ -2383,7 +2336,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key, bool asc #if defined(_DEBUG_VIEW) printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n", - elem.ts, key, elem.tag.i64, pQuery->order.order, pRuntimeEnv->pTsBuf->tsOrder, + elem.ts, key, elem.tag.i64, pQueryAttr->order.order, pRuntimeEnv->pTsBuf->tsOrder, pRuntimeEnv->pTsBuf->cur.order, pRuntimeEnv->pTsBuf->cur.tsIndex); #endif @@ -2405,7 +2358,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key, bool asc } void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, - SSDataBlock* pBlock, bool ascQuery) { + SSDataBlock* pBlock, bool ascQuery) { int32_t numOfRows = pBlock->info.rows; int8_t *p = calloc(numOfRows, sizeof(int8_t)); @@ -2434,7 +2387,7 @@ void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInf } // save the cursor status - pRuntimeEnv->pQuery->current->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); + pRuntimeEnv->current->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); } else { for (int32_t i = 0; i < numOfRows; ++i) { bool qualified = false; @@ -2521,11 +2474,11 @@ void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInf if (start > 0) { SColumnInfoData* pColumnInfoData = taosArrayGet(pBlock->pDataBlock, 0); - assert(pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && - pColumnInfoData->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX); - - pBlock->info.window.skey = *(int64_t*)pColumnInfoData->pData; - pBlock->info.window.ekey = *(int64_t*)(pColumnInfoData->pData + TSDB_KEYSIZE * (start - 1)); + if (pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && + pColumnInfoData->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + pBlock->info.window.skey = *(int64_t*)pColumnInfoData->pData; + pBlock->info.window.ekey = *(int64_t*)(pColumnInfoData->pData + TSDB_KEYSIZE * (start - 1)); + } } } @@ -2554,18 +2507,18 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData return status; } -static void doSetFilterColumnInfo(SQuery* pQuery, SSDataBlock* pBlock) { - if (pQuery->numOfFilterCols > 0 && pQuery->pFilterInfo[0].pData != NULL) { +static void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock) { + if (numOfFilterCols > 0 && pFilterInfo[0].pData != NULL) { return; } // set the initial static data value filter expression - for (int32_t i = 0; i < pQuery->numOfFilterCols; ++i) { + for (int32_t i = 0; i < numOfFilterCols; ++i) { for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, j); - if (pQuery->pFilterInfo[i].info.colId == pColInfo->info.colId) { - pQuery->pFilterInfo[i].pData = pColInfo->pData; + if (pFilterInfo[i].info.colId == pColInfo->info.colId) { + pFilterInfo[i].pData = pColInfo->pData; break; } } @@ -2578,9 +2531,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pBlock->pDataBlock = NULL; pBlock->pBlockStatis = NULL; - SQuery* pQuery = pRuntimeEnv->pQuery; - int64_t groupId = pQuery->current->groupIndex; - bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int64_t groupId = pRuntimeEnv->current->groupIndex; + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); SQInfo* pQInfo = pRuntimeEnv->qinfo; SQueryCostInfo* pCost = &pQInfo->summary; @@ -2588,15 +2541,15 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa if (pRuntimeEnv->pTsBuf != NULL) { (*status) = BLK_DATA_ALL_NEEDED; - if (pQuery->stableQuery) { // todo refactor + if (pQueryAttr->stableQuery) { // todo refactor SExprInfo* pExprInfo = &pTableScanInfo->pExpr[0]; - int16_t tagId = (int16_t)pExprInfo->base.arg->argValue.i64; - SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagId); + int16_t tagId = (int16_t)pExprInfo->base.param[0].i64; + SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagId); // compare tag first tVariant t = {0}; - doSetTagValueInParam(pQuery->current->pTable, tagId, &t, pColInfo->type, pColInfo->bytes); - setTimestampListJoinInfo(pRuntimeEnv, &t, pQuery->current); + doSetTagValueInParam(pRuntimeEnv->current->pTable, tagId, &t, pColInfo->type, pColInfo->bytes); + setTimestampListJoinInfo(pRuntimeEnv, &t, pRuntimeEnv->current); STSElem elem = tsBufGetElem(pRuntimeEnv->pTsBuf); if (!tsBufIsValidElem(&elem) || (tsBufIsValidElem(&elem) && (tVariantCompare(&t, elem.tag) != 0))) { @@ -2608,8 +2561,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // Calculate all time windows that are overlapping or contain current data block. // If current data block is contained by all possible time window, do not load current data block. - if (pQuery->numOfFilterCols > 0 || pQuery->groupbyColumn || pQuery->sw.gap > 0 || - (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, &pBlock->info))) { + if (pQueryAttr->numOfFilterCols > 0 || pQueryAttr->groupbyColumn || pQueryAttr->sw.gap > 0 || + (QUERY_IS_INTERVAL_QUERY(pQueryAttr) && overlapWithTimeWindow(pQueryAttr, &pBlock->info))) { (*status) = BLK_DATA_ALL_NEEDED; } @@ -2617,30 +2570,32 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa if ((*status) != BLK_DATA_ALL_NEEDED) { // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { SResultRow* pResult = NULL; bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; - STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQuery); + STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - } else if (pQuery->stableQuery && (!isTsCompQuery(pQuery))) { // stable aggregate, not interval aggregate or normal column aggregate + } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery)) { // stable aggregate, not interval aggregate or normal column aggregate doSetTableGroupOutputBuf(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx, pTableScanInfo->rowCellInfoOffset, pTableScanInfo->numOfOutput, - pQuery->current->groupIndex); + pRuntimeEnv->current->groupIndex); } (*status) = doFilterByBlockTimeWindow(pTableScanInfo, pBlock); } SDataBlockInfo* pBlockInfo = &pBlock->info; - if ((*status) == BLK_DATA_NO_NEEDED) { - qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, + *status = updateBlockLoadStatus(pRuntimeEnv->pQueryAttr, *status); + + if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) { + qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->discardBlocks += 1; } else if ((*status) == BLK_DATA_STATIS_NEEDED) { @@ -2659,15 +2614,15 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pCost->loadBlockStatis += 1; tsdbRetrieveDataBlockStatisInfo(pTableScanInfo->pQueryHandle, &pBlock->pBlockStatis); - if (pQuery->topBotQuery && pBlock->pBlockStatis != NULL) { + if (pQueryAttr->topBotQuery && pBlock->pBlockStatis != NULL) { { // set previous window - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { SResultRow* pResult = NULL; bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; - STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQuery); + STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { @@ -2676,14 +2631,14 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa } } bool load = false; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { int32_t functionId = pTableScanInfo->pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { load = topbot_datablock_filter(&pTableScanInfo->pCtx[i], (char*)&(pBlock->pBlockStatis[i].min), (char*)&(pBlock->pBlockStatis[i].max)); if (!load) { // current block has been discard due to filter applied pCost->discardBlocks += 1; - qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, + qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); (*status) = BLK_DATA_DISCARD; return TSDB_CODE_SUCCESS; @@ -2695,7 +2650,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // current block has been discard due to filter applied if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) { pCost->discardBlocks += 1; - qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, + qDebug("QInfo:0x%"PRIx64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); (*status) = BLK_DATA_DISCARD; return TSDB_CODE_SUCCESS; @@ -2708,9 +2663,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa return terrno; } - doSetFilterColumnInfo(pQuery, pBlock); - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL) { - filterRowsInDataBlock(pRuntimeEnv, pQuery->pFilterInfo, pQuery->numOfFilterCols, pBlock, ascQuery); + doSetFilterColumnInfo(pQueryAttr->pFilterInfo, pQueryAttr->numOfFilterCols, pBlock); + if (pQueryAttr->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL) { + filterRowsInDataBlock(pRuntimeEnv, pQueryAttr->pFilterInfo, pQueryAttr->numOfFilterCols, pBlock, ascQuery); } } @@ -2825,22 +2780,22 @@ static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t num void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCtx, int32_t numOfOutput) { SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; - SExprInfo *pExpr = pOperatorInfo->pExpr; - SQuery *pQuery = pRuntimeEnv->pQuery; + SExprInfo *pExpr = pOperatorInfo->pExpr; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SExprInfo* pExprInfo = &pExpr[0]; - if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP && pQuery->stableQuery) { + if (pQueryAttr->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP && pQueryAttr->stableQuery) { assert(pExprInfo->base.numOfParams == 1); - int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; - SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); + int16_t tagColId = (int16_t)pExprInfo->base.param[0].i64; + SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagColId); doSetTagValueInParam(pTable, tagColId, &pCtx[0].tag, pColInfo->type, pColInfo->bytes); return; } else { // set tag value, by which the results are aggregated. int32_t offset = 0; - memset(pRuntimeEnv->tagVal, 0, pQuery->tagLen); + memset(pRuntimeEnv->tagVal, 0, pQueryAttr->tagLen); for (int32_t idx = 0; idx < numOfOutput; ++idx) { SExprInfo* pLocalExprInfo = &pExpr[idx]; @@ -2851,20 +2806,20 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt } // todo use tag column index to optimize performance - doSetTagValueInParam(pTable, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->type, - pLocalExprInfo->bytes); + doSetTagValueInParam(pTable, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType, + pLocalExprInfo->base.resBytes); - if (IS_NUMERIC_TYPE(pLocalExprInfo->type) || pLocalExprInfo->type == TSDB_DATA_TYPE_BOOL) { - memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->bytes); + if (IS_NUMERIC_TYPE(pLocalExprInfo->base.resType) || pLocalExprInfo->base.resType == TSDB_DATA_TYPE_BOOL) { + memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes); } else { memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen); } - offset += pLocalExprInfo->bytes; + offset += pLocalExprInfo->base.resBytes; } //todo : use index to avoid iterator all possible output columns - if (pQuery->stableQuery && pQuery->stabledev && (pRuntimeEnv->prevResult != NULL)) { + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { setParamForStableStddev(pRuntimeEnv, pCtx, numOfOutput, pExprInfo); } } @@ -2945,32 +2900,32 @@ static UNUSED_FUNC void printBinaryData(int32_t functionId, char *data, int32_t } void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfRows) { - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t numOfCols = pQuery->numOfOutput; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t numOfCols = pQueryAttr->numOfOutput; printf("super table query intermediate result, total:%d\n", numOfRows); for (int32_t j = 0; j < numOfRows; ++j) { for (int32_t i = 0; i < numOfCols; ++i) { - switch (pQuery->pExpr1[i].type) { + switch (pQueryAttr->pExpr1[i].base.resType) { case TSDB_DATA_TYPE_BINARY: { - int32_t type = pQuery->pExpr1[i].type; - printBinaryData(pQuery->pExpr1[i].base.functionId, pdata[i]->data + pQuery->pExpr1[i].bytes * j, + int32_t type = pQueryAttr->pExpr1[i].base.resType; + printBinaryData(pQueryAttr->pExpr1[i].base.functionId, pdata[i]->data + pQueryAttr->pExpr1[i].base.resBytes * j, type); break; } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQueryAttr->pExpr1[i].base.resBytes * j)); break; case TSDB_DATA_TYPE_INT: - printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); + printf("%d\t", *(int32_t *)(pdata[i]->data + pQueryAttr->pExpr1[i].base.resBytes * j)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%f\t", *(float *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); + printf("%f\t", *(float *)(pdata[i]->data + pQueryAttr->pExpr1[i].base.resBytes * j)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); + printf("%lf\t", *(double *)(pdata[i]->data + pQueryAttr->pExpr1[i].base.resBytes * j)); break; } } @@ -3010,7 +2965,7 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl } -static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) { +static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQueryInfo *pTableQueryInfo) { if (pTableQueryInfo == NULL) { return; } @@ -3026,17 +2981,17 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * } static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pRuntimeEnv)); for(int32_t i = 0; i < numOfGroups; ++i) { SArray *group = GET_TABLEGROUP(pRuntimeEnv, i); - SArray *tableKeyGroup = taosArrayGetP(pQuery->tableGroupInfo.pGroupList, i); + SArray *tableKeyGroup = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); size_t t = taosArrayGetSize(group); for (int32_t j = 0; j < t; ++j) { STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); - updateTableQueryInfoForReverseScan(pQuery, pCheckInfo); + updateTableQueryInfoForReverseScan(pQueryAttr, pCheckInfo); // update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide // the start check timestamp of tsdbQueryHandle @@ -3069,7 +3024,7 @@ int32_t initResultRow(SResultRow *pResultRow) { * +------------+-------------------------------------------+-------------------------------------------+ * offset[0] offset[1] */ -void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid) { +void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid, int32_t stage) { SQLFunctionCtx* pCtx = pInfo->pCtx; SSDataBlock* pDataBlock = pInfo->pRes; int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset; @@ -3088,8 +3043,9 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i SResultRowCellInfo* pCellInfo = getResultCell(pRow, i, rowCellInfoOffset); RESET_RESULT_INFO(pCellInfo); - pCtx[i].resultInfo = pCellInfo; - pCtx[i].pOutput = pData->pData; + pCtx[i].resultInfo = pCellInfo; + pCtx[i].pOutput = pData->pData; + pCtx[i].currentStage = stage; assert(pCtx[i].pOutput != NULL); // set the timestamp output buffer for top/bottom/diff query @@ -3102,21 +3058,21 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i initCtxOutputBuffer(pCtx, pDataBlock->info.numOfCols); } -void updateOutputBuf(SArithOperatorInfo* pInfo, int32_t numOfInputRows) { - SOptrBasicInfo* pBInfo = &pInfo->binfo; +void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows) { SSDataBlock* pDataBlock = pBInfo->pRes; - int32_t newSize = pDataBlock->info.rows + numOfInputRows; - if (pInfo->bufCapacity < newSize) { + int32_t newSize = pDataBlock->info.rows + numOfInputRows + 5; // extra output buffer + if ((*bufCapacity) < newSize) { for(int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); + char* p = realloc(pColInfo->pData, newSize * pColInfo->info.bytes); if (p != NULL) { pColInfo->pData = p; // it starts from the tail of the previously generated results. pBInfo->pCtx[i].pOutput = pColInfo->pData; - pInfo->bufCapacity = newSize; + (*bufCapacity) = newSize; } else { // longjmp } @@ -3157,7 +3113,7 @@ void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status) { } static void setupEnvForReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, SQLFunctionCtx* pCtx, int32_t numOfOutput) { - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; if (pRuntimeEnv->pTsBuf) { SWITCH_ORDER(pRuntimeEnv->pTsBuf->cur.order); @@ -3166,25 +3122,25 @@ static void setupEnvForReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo } // reverse order time range - SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); + SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); SET_REVERSE_SCAN_FLAG(pRuntimeEnv); setQueryStatus(pRuntimeEnv, QUERY_NOT_COMPLETED); switchCtxOrder(pCtx, numOfOutput); - SWITCH_ORDER(pQuery->order.order); + SWITCH_ORDER(pQueryAttr->order.order); setupQueryRangeForReverseScan(pRuntimeEnv); } void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset) { SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv; - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t numOfOutput = pOperator->numOfOutput; - if (pQuery->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery) || pQuery->sw.gap > 0) { + if (pQueryAttr->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQueryAttr) || pQueryAttr->sw.gap > 0) { // for each group result, call the finalize function for each column - if (pQuery->groupbyColumn) { + if (pQueryAttr->groupbyColumn) { closeAllResultRows(pResultRowInfo); } @@ -3214,9 +3170,9 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult } } -static bool hasMainOutput(SQuery *pQuery) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; +static bool hasMainOutput(SQueryAttr *pQueryAttr) { + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; if (functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TAGPRJ) { return true; @@ -3226,7 +3182,7 @@ static bool hasMainOutput(SQuery *pQuery) { return false; } -static STableQueryInfo *createTableQueryInfo(SQuery* pQuery, void* pTable, bool groupbyColumn, STimeWindow win, void* buf) { +STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf) { STableQueryInfo *pTableQueryInfo = buf; pTableQueryInfo->win = win; @@ -3236,7 +3192,7 @@ static STableQueryInfo *createTableQueryInfo(SQuery* pQuery, void* pTable, bool pTableQueryInfo->cur.vgroupIndex = -1; // set more initial size of interval/groupby query - if (QUERY_IS_INTERVAL_QUERY(pQuery) || groupbyColumn) { + if (QUERY_IS_INTERVAL_QUERY(pQueryAttr) || groupbyColumn) { int32_t initialSize = 128; int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { @@ -3272,7 +3228,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe continue; } - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQueryAttr, bufPage, pResult->offset, offset); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3298,7 +3254,7 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe * all group belong to one result set, and each group result has different group id so set the id to be one */ if (pResultRow->pageId == -1) { - int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->pQuery->resultRowSize); + int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->pQueryAttr->resultRowSize); if (ret != TSDB_CODE_SUCCESS) { return; } @@ -3309,7 +3265,7 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t groupIndex, TSKEY nextKey) { - STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current; + STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; @@ -3330,7 +3286,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF int16_t offset = 0; for (int32_t i = 0; i < numOfCols; ++i) { - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQueryAttr, page, pResult->offset, offset); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3347,32 +3303,32 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF } void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExprInfo* pExprInfo, void* pTable) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - SSqlFuncMsg* pFuncMsg = &pExprInfo->base; - if (pQuery->stableQuery && (pRuntimeEnv->pTsBuf != NULL) && - (pFuncMsg->functionId == TSDB_FUNC_TS || pFuncMsg->functionId == TSDB_FUNC_PRJ) && - (pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { - assert(pFuncMsg->numOfParams == 1); + SSqlExpr* pExpr = &pExprInfo->base; + if (pQueryAttr->stableQuery && (pRuntimeEnv->pTsBuf != NULL) && + (pExpr->functionId == TSDB_FUNC_TS || pExpr->functionId == TSDB_FUNC_PRJ) && + (pExpr->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { + assert(pExpr->numOfParams == 1); - int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; - SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); + int16_t tagColId = (int16_t)pExprInfo->base.param[0].i64; + SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagColId); doSetTagValueInParam(pTable, tagColId, &pCtx->tag, pColInfo->type, pColInfo->bytes); int16_t tagType = pCtx[0].tag.nType; if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%"PRIu64" set tag value for join comparison, colId:%" PRId64 ", val:%s", GET_QID(pRuntimeEnv), - pExprInfo->base.arg->argValue.i64, pCtx[0].tag.pz); + qDebug("QInfo:0x%"PRIx64" set tag value for join comparison, colId:%" PRId64 ", val:%s", GET_QID(pRuntimeEnv), + pExprInfo->base.param[0].i64, pCtx[0].tag.pz); } else { - qDebug("QInfo:%"PRIu64" set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, GET_QID(pRuntimeEnv), - pExprInfo->base.arg->argValue.i64, pCtx[0].tag.i64); + qDebug("QInfo:0x%"PRIx64" set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, GET_QID(pRuntimeEnv), + pExprInfo->base.param[0].i64, pCtx[0].tag.i64); } } } int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, STableQueryInfo *pTableQueryInfo) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; assert(pRuntimeEnv->pTsBuf != NULL); @@ -3380,14 +3336,14 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, if (pTableQueryInfo->cur.vgroupIndex == -1) { tVariantAssign(&pTableQueryInfo->tag, pTag); - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQuery->vgId, &pTableQueryInfo->tag); + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQueryAttr->vgId, &pTableQueryInfo->tag); // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem)) { if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qError("QInfo:%"PRIu64" failed to find tag:%s in ts_comp", GET_QID(pRuntimeEnv), pTag->pz); + qError("QInfo:0x%"PRIx64" failed to find tag:%s in ts_comp", GET_QID(pRuntimeEnv), pTag->pz); } else { - qError("QInfo:%"PRIu64" failed to find tag:%" PRId64 " in ts_comp", GET_QID(pRuntimeEnv), pTag->i64); + qError("QInfo:0x%"PRIx64" failed to find tag:%" PRId64 " in ts_comp", GET_QID(pRuntimeEnv), pTag->i64); } return -1; @@ -3396,34 +3352,34 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, // Keep the cursor info of current table pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:0x%"PRIx64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { - qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:0x%"PRIx64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } } else { tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:0x%"PRIx64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { - qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:0x%"PRIx64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } } return 0; } -void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr) { - SQuery* pQuery = pRuntimeEnv->pQuery; +void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) { + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfExprs = pQuery->numOfOutput; + int32_t numOfExprs = pQueryAttr->numOfOutput; for(int32_t i = 0; i < numOfExprs; ++i) { - SExprInfo* pExprInfo = &(pExpr[i]); - if (pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) { + SExprInfo* pExprInfo1 = &(pExprInfo[i]); + if (pExprInfo1->base.functionId != TSDB_FUNC_STDDEV_DST) { continue; } - SSqlFuncMsg* pFuncMsg = &pExprInfo->base; + SSqlExpr* pExpr = &pExprInfo1->base; pCtx[i].param[0].arr = NULL; pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int @@ -3432,11 +3388,11 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult); for (int32_t j = 0; j < numOfGroup; ++j) { SInterResult* p = taosArrayGet(pRuntimeEnv->prevResult, j); - if (pQuery->tagLen == 0 || memcmp(p->tags, pRuntimeEnv->tagVal, pQuery->tagLen) == 0) { + if (pQueryAttr->tagLen == 0 || memcmp(p->tags, pRuntimeEnv->tagVal, pQueryAttr->tagLen) == 0) { int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); for (int32_t k = 0; k < numOfCols; ++k) { SStddevInterResult* pres = taosArrayGet(p->pResult, k); - if (pres->colId == pFuncMsg->colInfo.colId) { + if (pres->colId == pExpr->colInfo.colId) { pCtx[i].param[0].arr = pres->pResult; break; } @@ -3448,17 +3404,15 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfExprs = pQuery->numOfOutput; + int32_t numOfExprs = pQueryAttr->numOfOutput; for(int32_t i = 0; i < numOfExprs; ++i) { - SExprInfo* pExprInfo = &(pExpr[i]); - if (pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) { + SSqlExpr* pExpr1 = &pExpr[i].base; + if (pExpr1->functionId != TSDB_FUNC_STDDEV_DST) { continue; } - SSqlFuncMsg* pFuncMsg = &pExprInfo->base; - pCtx[i].param[0].arr = NULL; pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int @@ -3470,7 +3424,7 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); for (int32_t k = 0; k < numOfCols; ++k) { SStddevInterResult* pres = taosArrayGet(p->pResult, k); - if (pres->colId == pFuncMsg->colInfo.colId) { + if (pres->colId == pExpr1->colInfo.colId) { pCtx[i].param[0].arr = pres->pResult; break; } @@ -3478,23 +3432,20 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction } } } - } - - /* * There are two cases to handle: * - * 1. Query range is not set yet (queryRangeSet = 0). we need to set the query range info, including pQuery->lastKey, - * pQuery->window.skey, and pQuery->eKey. + * 1. Query range is not set yet (queryRangeSet = 0). we need to set the query range info, including pQueryAttr->lastKey, + * pQueryAttr->window.skey, and pQueryAttr->eKey. * 2. Query range is set and query is in progress. There may be another result with the same query ranges to be * merged during merge stage. In this case, we need the pTableQueryInfo->lastResRows to decide if there * is a previous result generated or not. */ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { - SQuery *pQuery = pRuntimeEnv->pQuery; - STableQueryInfo *pTableQueryInfo = pQuery->current; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; + STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; SResultRowInfo *pWindowResInfo = &pTableQueryInfo->resInfo; if (pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL) { @@ -3502,7 +3453,7 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { } pTableQueryInfo->win.skey = key; - STimeWindow win = {.skey = key, .ekey = pQuery->window.ekey}; + STimeWindow win = {.skey = key, .ekey = pQueryAttr->window.ekey}; /** * In handling the both ascending and descending order super table query, we need to find the first qualified @@ -3514,11 +3465,11 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { TSKEY sk = MIN(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey); - getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &w); + getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w); if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { - if (!QUERY_IS_ASC_QUERY(pQuery)) { - assert(win.ekey == pQuery->window.ekey); + if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { + assert(win.ekey == pQueryAttr->window.ekey); } pWindowResInfo->prevSKey = w.skey; @@ -3538,13 +3489,15 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { */ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) { + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); int32_t numOfResult = pBlock->info.rows; // there are already exists result rows int32_t start = 0; int32_t step = -1; - qDebug("QInfo:%"PRIu64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv)); + qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv)); if (orderType == TSDB_ORDER_ASC) { start = pGroupResInfo->index; step = 1; @@ -3572,7 +3525,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* int32_t bytes = pColInfoData->info.bytes; char *out = pColInfoData->pData + numOfResult * bytes; - char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes); + char *in = getPosInResultPage(pQueryAttr, page, pRow->offset, offset); memcpy(out, in, bytes * numOfRowsToCopy); offset += bytes; @@ -3584,7 +3537,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* } } - qDebug("QInfo:%"PRIu64" copy data to query buf completed", GET_QID(pRuntimeEnv)); + qDebug("QInfo:0x%"PRIx64" copy data to query buf completed", GET_QID(pRuntimeEnv)); pBlock->info.rows = numOfResult; return 0; } @@ -3597,10 +3550,11 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti return; } - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSDB_ORDER_ASC; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t orderType = (pQueryAttr->pGroupbyExpr != NULL) ? pQueryAttr->pGroupbyExpr->orderType : TSDB_ORDER_ASC; doCopyToSDataBlock(pRuntimeEnv, pGroupResInfo, orderType, pBlock); + // refactor : extract method SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0); if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { @@ -3610,12 +3564,12 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti } } -static void updateNumOfRowsInResultRows(SQueryRuntimeEnv *pRuntimeEnv, - SQLFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset) { - SQuery *pQuery = pRuntimeEnv->pQuery; +static void updateNumOfRowsInResultRows(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, + SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset) { + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; // update the number of result for each, only update the number of rows for the corresponding window result. - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { return; } @@ -3636,18 +3590,18 @@ static void updateNumOfRowsInResultRows(SQueryRuntimeEnv *pRuntimeEnv, static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data) { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SSDataBlock* pRes = pRuntimeEnv->outputBuf; - if (pQuery->pExpr2 == NULL) { - for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { + if (pQueryAttr->pExpr2 == NULL) { + for (int32_t col = 0; col < pQueryAttr->numOfOutput; ++col) { SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows); data += pColRes->info.bytes * pRes->info.rows; } } else { - for (int32_t col = 0; col < pQuery->numOfExpr2; ++col) { + for (int32_t col = 0; col < pQueryAttr->numOfExpr2; ++col) { SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows); data += pColRes->info.bytes * numOfRows; @@ -3670,11 +3624,11 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data data += sizeof(STableIdInfo); total++; - qDebug("QInfo:%"PRIu64" set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo->qId, item->tid, item->uid, item->key); + qDebug("QInfo:0x%"PRIx64" set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo->qId, item->tid, item->uid, item->key); item = taosHashIterate(pRuntimeEnv->pTableRetrieveTsMap, item); } - qDebug("QInfo:%"PRIu64" set %d subscribe info", pQInfo->qId, total); + qDebug("QInfo:0x%"PRIx64" set %d subscribe info", pQInfo->qId, total); // Check if query is completed or not for stable query or normal table query respectively. if (Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) && pRuntimeEnv->proot->status == OP_EXEC_DONE) { setQueryStatus(pRuntimeEnv, QUERY_OVER); @@ -3713,62 +3667,62 @@ void queryCostStatis(SQInfo *pQInfo) { pSummary->numOfTimeWindows = 0; } - qDebug("QInfo:%"PRIu64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " + qDebug("QInfo:0x%"PRIx64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " "load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); - qDebug("QInfo:%"PRIu64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, + qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); } //static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { -// SQuery *pQuery = pRuntimeEnv->pQuery; -// STableQueryInfo* pTableQueryInfo = pQuery->current; +// SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; +// STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; // -// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); +// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); // -// if (pQuery->limit.offset == pBlockInfo->rows) { // current block will ignore completed -// pTableQueryInfo->lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->window.ekey + step : pBlockInfo->window.skey + step; -// pQuery->limit.offset = 0; +// if (pQueryAttr->limit.offset == pBlockInfo->rows) { // current block will ignore completed +// pTableQueryInfo->lastKey = QUERY_IS_ASC_QUERY(pQueryAttr) ? pBlockInfo->window.ekey + step : pBlockInfo->window.skey + step; +// pQueryAttr->limit.offset = 0; // return; // } // -// if (QUERY_IS_ASC_QUERY(pQuery)) { -// pQuery->pos = (int32_t)pQuery->limit.offset; +// if (QUERY_IS_ASC_QUERY(pQueryAttr)) { +// pQueryAttr->pos = (int32_t)pQueryAttr->limit.offset; // } else { -// pQuery->pos = pBlockInfo->rows - (int32_t)pQuery->limit.offset - 1; +// pQueryAttr->pos = pBlockInfo->rows - (int32_t)pQueryAttr->limit.offset - 1; // } // -// assert(pQuery->pos >= 0 && pQuery->pos <= pBlockInfo->rows - 1); +// assert(pQueryAttr->pos >= 0 && pQueryAttr->pos <= pBlockInfo->rows - 1); // // SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); // SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); // -// // update the pQuery->limit.offset value, and pQuery->pos value +// // update the pQueryAttr->limit.offset value, and pQueryAttr->pos value // TSKEY *keys = (TSKEY *) pColInfoData->pData; // // // update the offset value -// pTableQueryInfo->lastKey = keys[pQuery->pos]; -// pQuery->limit.offset = 0; +// pTableQueryInfo->lastKey = keys[pQueryAttr->pos]; +// pQueryAttr->limit.offset = 0; // // int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); // -// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QID(pRuntimeEnv), +// qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QID(pRuntimeEnv), // pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQuery->current->lastKey); //} //void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { -// SQuery *pQuery = pRuntimeEnv->pQuery; +// SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; // -// if (pQuery->limit.offset <= 0 || pQuery->numOfFilterCols > 0) { +// if (pQueryAttr->limit.offset <= 0 || pQueryAttr->numOfFilterCols > 0) { // return; // } // -// pQuery->pos = 0; -// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); +// pQueryAttr->pos = 0; +// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); // -// STableQueryInfo* pTableQueryInfo = pQuery->current; +// STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; // TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; // // SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; @@ -3779,12 +3733,12 @@ void queryCostStatis(SQInfo *pQInfo) { // // tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); // -// if (pQuery->limit.offset > blockInfo.rows) { -// pQuery->limit.offset -= blockInfo.rows; -// pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey; +// if (pQueryAttr->limit.offset > blockInfo.rows) { +// pQueryAttr->limit.offset -= blockInfo.rows; +// pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? blockInfo.window.ekey : blockInfo.window.skey; // pTableQueryInfo->lastKey += step; // -// qDebug("QInfo:%"PRIu64" skip rows:%d, offset:%" PRId64, GET_QID(pRuntimeEnv), blockInfo.rows, +// qDebug("QInfo:0x%"PRIx64" skip rows:%d, offset:%" PRId64, GET_QID(pRuntimeEnv), blockInfo.rows, // pQuery->limit.offset); // } else { // find the appropriated start position in current block // updateOffsetVal(pRuntimeEnv, &blockInfo); @@ -3798,15 +3752,15 @@ void queryCostStatis(SQInfo *pQInfo) { //} //static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* win, SDataBlockInfo* pBlockInfo, STableQueryInfo* pTableQueryInfo) { -// SQuery *pQuery = pRuntimeEnv->pQuery; +// SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; // SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; // -// assert(pQuery->limit.offset == 0); +// assert(pQueryAttr->limit.offset == 0); // STimeWindow tw = *win; -// getNextTimeWindow(pQuery, &tw); +// getNextTimeWindow(pQueryAttr, &tw); // -// if ((tw.skey <= pBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || -// (tw.ekey >= pBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { +// if ((tw.skey <= pBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) || +// (tw.ekey >= pBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQueryAttr))) { // // // load the data block and check data remaining in current data block // // TODO optimize performance @@ -3815,15 +3769,15 @@ void queryCostStatis(SQInfo *pQInfo) { // // tw = *win; // int32_t startPos = -// getNextQualifiedWindow(pQuery, &tw, pBlockInfo, pColInfoData->pData, binarySearchForKey, -1); +// getNextQualifiedWindow(pQueryAttr, &tw, pBlockInfo, pColInfoData->pData, binarySearchForKey, -1); // assert(startPos >= 0); // // // set the abort info -// pQuery->pos = startPos; +// pQueryAttr->pos = startPos; // // // reset the query start timestamp // pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; -// pQuery->window.skey = pTableQueryInfo->win.skey; +// pQueryAttr->window.skey = pTableQueryInfo->win.skey; // TSKEY key = pTableQueryInfo->win.skey; // // pWindowResInfo->prevSKey = tw.skey; @@ -3832,13 +3786,13 @@ void queryCostStatis(SQInfo *pQInfo) { // int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); // pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index // -// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, +// qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, // GET_QID(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, -// pQuery->current->lastKey); +// pQueryAttr->current->lastKey); // // return key; // } else { // do nothing -// pQuery->window.skey = tw.skey; +// pQueryAttr->window.skey = tw.skey; // pWindowResInfo->prevSKey = tw.skey; // pTableQueryInfo->lastKey = tw.skey; // @@ -3849,53 +3803,53 @@ void queryCostStatis(SQInfo *pQInfo) { //} //static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { -// SQuery *pQuery = pRuntimeEnv->pQuery; -// if (QUERY_IS_ASC_QUERY(pQuery)) { -// assert(*start <= pQuery->current->lastKey); +// SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; +// if (QUERY_IS_ASC_QUERY(pQueryAttr)) { +// assert(*start <= pRuntimeEnv->current->lastKey); // } else { -// assert(*start >= pQuery->current->lastKey); +// assert(*start >= pRuntimeEnv->current->lastKey); // } // // // if queried with value filter, do NOT forward query start position -// if (pQuery->limit.offset <= 0 || pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->pFillInfo != NULL) { +// if (pQueryAttr->limit.offset <= 0 || pQueryAttr->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->pFillInfo != NULL) { // return true; // } // // /* -// * 1. for interval without interpolation query we forward pQuery->interval.interval at a time for -// * pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is -// * not valid. otherwise, we only forward pQuery->limit.offset number of points +// * 1. for interval without interpolation query we forward pQueryAttr->interval.interval at a time for +// * pQueryAttr->limit.offset times. Since hole exists, pQueryAttr->interval.interval*pQueryAttr->limit.offset value is +// * not valid. otherwise, we only forward pQueryAttr->limit.offset number of points // */ // assert(pRuntimeEnv->resultRowInfo.prevSKey == TSKEY_INITIAL_VAL); // // STimeWindow w = TSWINDOW_INITIALIZER; -// bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); +// bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); // // SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; -// STableQueryInfo *pTableQueryInfo = pQuery->current; +// STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; // // SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; // while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { // tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo); // -// if (QUERY_IS_ASC_QUERY(pQuery)) { +// if (QUERY_IS_ASC_QUERY(pQueryAttr)) { // if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { -// getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w); +// getAlignQueryTimeWindow(pQueryAttr, blockInfo.window.skey, blockInfo.window.skey, pQueryAttr->window.ekey, &w); // pWindowResInfo->prevSKey = w.skey; // } // } else { -// getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w); +// getAlignQueryTimeWindow(pQueryAttr, blockInfo.window.ekey, pQueryAttr->window.ekey, blockInfo.window.ekey, &w); // pWindowResInfo->prevSKey = w.skey; // } // // // the first time window -// STimeWindow win = getActiveTimeWindow(pWindowResInfo, pWindowResInfo->prevSKey, pQuery); +// STimeWindow win = getActiveTimeWindow(pWindowResInfo, pWindowResInfo->prevSKey, pQueryAttr); // -// while (pQuery->limit.offset > 0) { +// while (pQueryAttr->limit.offset > 0) { // STimeWindow tw = win; // // if ((win.ekey <= blockInfo.window.ekey && ascQuery) || (win.ekey >= blockInfo.window.skey && !ascQuery)) { -// pQuery->limit.offset -= 1; +// pQueryAttr->limit.offset -= 1; // pWindowResInfo->prevSKey = win.skey; // // // current time window is aligned with blockInfo.window.ekey @@ -3905,13 +3859,13 @@ void queryCostStatis(SQInfo *pQInfo) { // } // } // -// if (pQuery->limit.offset == 0) { +// if (pQueryAttr->limit.offset == 0) { // *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); // return true; // } // // // current window does not ended in current data block, try next data block -// getNextTimeWindow(pQuery, &tw); +// getNextTimeWindow(pQueryAttr, &tw); // // /* // * If the next time window still starts from current data block, @@ -3926,20 +3880,20 @@ void queryCostStatis(SQInfo *pQInfo) { // SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); // // if ((win.ekey > blockInfo.window.ekey && ascQuery) || (win.ekey < blockInfo.window.skey && !ascQuery)) { -// pQuery->limit.offset -= 1; +// pQueryAttr->limit.offset -= 1; // } // -// if (pQuery->limit.offset == 0) { +// if (pQueryAttr->limit.offset == 0) { // *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); // return true; // } else { // tw = win; // int32_t startPos = -// getNextQualifiedWindow(pQuery, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); +// getNextQualifiedWindow(pQueryAttr, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); // assert(startPos >= 0); // // // set the abort info -// pQuery->pos = startPos; +// pQueryAttr->pos = startPos; // pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; // pWindowResInfo->prevSKey = tw.skey; // win = tw; @@ -3960,26 +3914,25 @@ void queryCostStatis(SQInfo *pQInfo) { static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo); -static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; +static int32_t setupQueryHandle(void* tsdb, SQueryRuntimeEnv* pRuntimeEnv, int64_t qId, bool isSTableQuery) { + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; // TODO set the tags scan handle - if (onlyQueryTags(pQuery)) { + if (onlyQueryTags(pQueryAttr)) { return TSDB_CODE_SUCCESS; } - STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window); - if (isTsCompQuery(pQuery) || isPointInterpoQuery(pQuery)) { + STsdbQueryCond cond = createTsdbQueryCond(pQueryAttr, &pQueryAttr->window); + if (pQueryAttr->tsCompQuery || pQueryAttr->pointInterpQuery) { cond.type = BLOCK_LOAD_TABLE_SEQ_ORDER; } if (!isSTableQuery && (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 1) && (cond.order == TSDB_ORDER_ASC) - && (!QUERY_IS_INTERVAL_QUERY(pQuery)) - && (!isGroupbyColumn(pQuery->pGroupbyExpr)) - && (!isFixedOutputQuery(pQuery)) + && (!QUERY_IS_INTERVAL_QUERY(pQueryAttr)) + && (!pQueryAttr->groupbyColumn) + && (!pQueryAttr->simpleAgg) ) { SArray* pa = GET_TABLEGROUP(pRuntimeEnv, 0); STableQueryInfo* pCheckInfo = taosArrayGetP(pa, 0); @@ -3987,12 +3940,12 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) } terrno = TSDB_CODE_SUCCESS; - if (isFirstLastRowQuery(pQuery)) { - pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef); + if (isFirstLastRowQuery(pQueryAttr)) { + pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQueryAttr->tableGroupInfo, qId, &pQueryAttr->memRef); // update the query time window - pQuery->window = cond.twindow; - if (pQuery->tableGroupInfo.numOfTables == 0) { + pQueryAttr->window = cond.twindow; + if (pQueryAttr->tableGroupInfo.numOfTables == 0) { pRuntimeEnv->tableqinfoGroupInfo.numOfTables = 0; } else { size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pRuntimeEnv); @@ -4003,15 +3956,15 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) for (int32_t j = 0; j < t; ++j) { STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); - pCheckInfo->win = pQuery->window; + pCheckInfo->win = pQueryAttr->window; pCheckInfo->lastKey = pCheckInfo->win.skey; } } } - } else if (isPointInterpoQuery(pQuery)) { - pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef); + } else if (pQueryAttr->pointInterpQuery) { + pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQueryAttr->tableGroupInfo, qId, &pQueryAttr->memRef); } else { - pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef); + pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQueryAttr->tableGroupInfo, qId, &pQueryAttr->memRef); } return terrno; @@ -4028,82 +3981,92 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in for(int32_t i = 0; i < numOfOutput; ++i) { SExprInfo* pExprInfo = &pExpr[i]; - pFillCol[i].col.bytes = pExprInfo->bytes; - pFillCol[i].col.type = (int8_t)pExprInfo->type; + pFillCol[i].col.bytes = pExprInfo->base.resBytes; + pFillCol[i].col.type = (int8_t)pExprInfo->base.resType; pFillCol[i].col.offset = offset; + pFillCol[i].col.colId = pExprInfo->base.resColId; pFillCol[i].tagIndex = -2; - pFillCol[i].flag = TSDB_COL_NORMAL; // always be ta normal column for table query + pFillCol[i].flag = pExprInfo->base.colInfo.flag; // always be the normal column for table query pFillCol[i].functionId = pExprInfo->base.functionId; pFillCol[i].fillVal.i = fillVal[i]; - offset += pExprInfo->bytes; + offset += pExprInfo->base.resBytes; } return pFillCol; } -int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { +int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, SArray* prevResult, void* tsdb, void* sourceOptr, int32_t tbScanner, + SArray* pOperator, void* param) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - pQuery->tsdb = tsdb; + SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; + pQueryAttr->tsdb = tsdb; - pQuery->topBotQuery = isTopBottomQuery(pQuery); - pQuery->hasTagResults = hasTagValOutput(pQuery); - pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery); - pQuery->stabledev = isStabledev(pQuery); + pRuntimeEnv->prevResult = prevResult; - setScanLimitationByResultBuffer(pQuery); - - int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); - if (code != TSDB_CODE_SUCCESS) { - return code; + if (tsdb != NULL) { + int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } - pQuery->tsdb = tsdb; - pQuery->vgId = vgId; - pQuery->stableQuery = isSTableQuery; - pQuery->groupbyColumn = isGroupbyColumn(pQuery->pGroupbyExpr); - pQuery->interBufSize = getOutputInterResultBufSize(pQuery); + pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr); - pRuntimeEnv->groupResInfo.totalGroup = (int32_t) (isSTableQuery? GET_NUM_OF_TABLEGROUP(pRuntimeEnv):0); + pRuntimeEnv->groupResInfo.totalGroup = (int32_t) (pQueryAttr->stableQuery? GET_NUM_OF_TABLEGROUP(pRuntimeEnv):0); - pRuntimeEnv->pQuery = pQuery; + pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pTsBuf = pTsBuf; pRuntimeEnv->cur.vgroupIndex = -1; - setResultBufSize(pQuery, &pRuntimeEnv->resultInfo); - - if (onlyQueryTags(pQuery)) { - pRuntimeEnv->resultInfo.capacity = 4096; - pRuntimeEnv->proot = createTagScanOperatorInfo(pRuntimeEnv, pQuery->pExpr1, pQuery->numOfOutput); - } else if (pQuery->queryBlockDist) { - pRuntimeEnv->pTableScanner = createTableBlockInfoScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); - } else if (isTsCompQuery(pQuery) || isPointInterpoQuery(pQuery)) { - pRuntimeEnv->pTableScanner = createTableSeqScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); - } else if (needReverseScan(pQuery)) { - pRuntimeEnv->pTableScanner = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQuery), 1); - } else { - pRuntimeEnv->pTableScanner = createTableScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQuery)); + setResultBufSize(pQueryAttr, &pRuntimeEnv->resultInfo); + + switch(tbScanner) { + case OP_TableBlockInfoScan: { + pRuntimeEnv->proot = createTableBlockInfoScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); + break; + } + case OP_TableSeqScan: { + pRuntimeEnv->proot = createTableSeqScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); + break; + } + case OP_DataBlocksOptScan: { + pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), 1); + break; + } + case OP_TableScan: { + pRuntimeEnv->proot = createTableScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr)); + break; + } + default: { // do nothing + break; + } + } + + if (sourceOptr != NULL) { + assert(pRuntimeEnv->proot == NULL); + pRuntimeEnv->proot = sourceOptr; } if (pTsBuf != NULL) { - int16_t order = (pQuery->order.order == pRuntimeEnv->pTsBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + int16_t order = (pQueryAttr->order.order == pRuntimeEnv->pTsBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; tsBufSetTraverseOrder(pRuntimeEnv->pTsBuf, order); } int32_t ps = DEFAULT_PAGE_SIZE; - getIntermediateBufInfo(pRuntimeEnv, &ps, &pQuery->intermediateResultRowSize); + getIntermediateBufInfo(pRuntimeEnv, &ps, &pQueryAttr->intermediateResultRowSize); int32_t TENMB = 1024*1024*10; - code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo->qId); + int32_t code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo->qId); if (code != TSDB_CODE_SUCCESS) { return code; } // create runtime environment - int32_t numOfTables = (int32_t)pQuery->tableGroupInfo.numOfTables; + int32_t numOfTables = (int32_t)pQueryAttr->tableGroupInfo.numOfTables; pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); - code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQuery->tableGroupInfo.numOfTables); + + code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4112,25 +4075,25 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo return TSDB_CODE_SUCCESS; } -static void doTableQueryInfoTimeWindowCheck(SQuery* pQuery, STableQueryInfo* pTableQueryInfo) { - if (QUERY_IS_ASC_QUERY(pQuery)) { +static void doTableQueryInfoTimeWindowCheck(SQueryAttr* pQueryAttr, STableQueryInfo* pTableQueryInfo) { + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { assert( (pTableQueryInfo->win.skey <= pTableQueryInfo->win.ekey) && (pTableQueryInfo->lastKey >= pTableQueryInfo->win.skey) && - (pTableQueryInfo->win.skey >= pQuery->window.skey && pTableQueryInfo->win.ekey <= pQuery->window.ekey)); + (pTableQueryInfo->win.skey >= pQueryAttr->window.skey && pTableQueryInfo->win.ekey <= pQueryAttr->window.ekey)); } else { assert( (pTableQueryInfo->win.skey >= pTableQueryInfo->win.ekey) && (pTableQueryInfo->lastKey <= pTableQueryInfo->win.skey) && - (pTableQueryInfo->win.skey <= pQuery->window.skey && pTableQueryInfo->win.ekey >= pQuery->window.ekey)); + (pTableQueryInfo->win.skey <= pQueryAttr->window.skey && pTableQueryInfo->win.ekey >= pQueryAttr->window.ekey)); } } -STsdbQueryCond createTsdbQueryCond(SQuery* pQuery, STimeWindow* win) { +STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* win) { STsdbQueryCond cond = { - .colList = pQuery->colList, - .order = pQuery->order.order, - .numOfCols = pQuery->numOfCols, + .colList = pQueryAttr->tableCols, + .order = pQueryAttr->order.order, + .numOfCols = pQueryAttr->numOfCols, .type = BLOCK_LOAD_OFFSET_SEQ_ORDER, .loadExternalRows = false, }; @@ -4177,13 +4140,16 @@ static void doCloseAllTimeWindow(SQueryRuntimeEnv* pRuntimeEnv) { } } -static SSDataBlock* doTableScanImpl(void* param) { +static SSDataBlock* doTableScanImpl(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; STableScanInfo* pTableScanInfo = pOperator->info; - SSDataBlock* pBlock = &pTableScanInfo->block; - SQuery* pQuery = pOperator->pRuntimeEnv->pQuery; - STableGroupInfo* pTableGroupInfo = &pOperator->pRuntimeEnv->tableqinfoGroupInfo; + SSDataBlock* pBlock = &pTableScanInfo->block; + SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + STableGroupInfo *pTableGroupInfo = &pOperator->pRuntimeEnv->tableqinfoGroupInfo; + + *newgroup = false; while (tsdbNextDataBlock(pTableScanInfo->pQueryHandle)) { if (isQueryKilled(pOperator->pRuntimeEnv->qinfo)) { @@ -4194,15 +4160,15 @@ static SSDataBlock* doTableScanImpl(void* param) { tsdbRetrieveDataBlockInfo(pTableScanInfo->pQueryHandle, &pBlock->info); // todo opt - if (pTableGroupInfo->numOfTables > 1 || (pQuery->current == NULL && pTableGroupInfo->numOfTables == 1)) { + if (pTableGroupInfo->numOfTables > 1 || (pRuntimeEnv->current == NULL && pTableGroupInfo->numOfTables == 1)) { STableQueryInfo** pTableQueryInfo = (STableQueryInfo**)taosHashGet(pTableGroupInfo->map, &pBlock->info.tid, sizeof(pBlock->info.tid)); if (pTableQueryInfo == NULL) { break; } - pQuery->current = *pTableQueryInfo; - doTableQueryInfoTimeWindowCheck(pQuery, *pTableQueryInfo); + pRuntimeEnv->current = *pTableQueryInfo; + doTableQueryInfoTimeWindowCheck(pQueryAttr, *pTableQueryInfo); } // this function never returns error? @@ -4223,17 +4189,18 @@ static SSDataBlock* doTableScanImpl(void* param) { return NULL; } -static SSDataBlock* doTableScan(void* param) { +static SSDataBlock* doTableScan(void* param, bool *newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; STableScanInfo *pTableScanInfo = pOperator->info; SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv; - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SResultRowInfo* pResultRowInfo = pTableScanInfo->pResultRowInfo; + *newgroup = false; while (pTableScanInfo->current < pTableScanInfo->times) { - SSDataBlock* p = doTableScanImpl(pOperator); + SSDataBlock* p = doTableScanImpl(pOperator, newgroup); if (p != NULL) { return p; } @@ -4247,7 +4214,7 @@ static SSDataBlock* doTableScan(void* param) { } // do prepare for the next round table scan operation - STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window); + STsdbQueryCond cond = createTsdbQueryCond(pQueryAttr, &pQueryAttr->window); tsdbResetQueryHandle(pTableScanInfo->pQueryHandle, &cond); setQueryStatus(pRuntimeEnv, QUERY_NOT_COMPLETED); @@ -4263,17 +4230,18 @@ static SSDataBlock* doTableScan(void* param) { pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey; } - qDebug("QInfo:%"PRIu64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, + qDebug("QInfo:0x%"PRIx64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey); } + SSDataBlock *p = NULL; if (pTableScanInfo->reverseTimes > 0) { setupEnvForReverseScan(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput); - STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window); + STsdbQueryCond cond = createTsdbQueryCond(pQueryAttr, &pQueryAttr->window); tsdbResetQueryHandle(pTableScanInfo->pQueryHandle, &cond); - qDebug("QInfo:%"PRIu64" start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, + qDebug("QInfo:0x%"PRIx64" start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey); pRuntimeEnv->scanFlag = REVERSE_SCAN; @@ -4288,22 +4256,20 @@ static SSDataBlock* doTableScan(void* param) { pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->size-1]->win.skey; } - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; - } + p = doTableScanImpl(pOperator, newgroup); } - return NULL; + return p; } -static SSDataBlock* doBlockInfoScan(void* param) { +static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { SOperatorInfo *pOperator = (SOperatorInfo*)param; if (pOperator->status == OP_EXEC_DONE) { return NULL; } STableScanInfo *pTableScanInfo = pOperator->info; + *newgroup = false; STableBlockDist tableBlockDist = {0}; tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; @@ -4329,7 +4295,7 @@ static SSDataBlock* doBlockInfoScan(void* param) { tbufCloseWriter(&bw); SArray* g = GET_TABLEGROUP(pOperator->pRuntimeEnv, 0); - pOperator->pRuntimeEnv->pQuery->current = taosArrayGetP(g, 0); + pOperator->pRuntimeEnv->current = taosArrayGetP(g, 0); pOperator->status = OP_EXEC_DONE; return pBlock; @@ -4342,15 +4308,16 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->times = repeatTime; pInfo->reverseTimes = 0; - pInfo->order = pRuntimeEnv->pQuery->order.order; + pInfo->order = pRuntimeEnv->pQueryAttr->order.order; pInfo->current = 0; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "TableScanOperator"; + pOperator->operatorType = OP_TableScan; pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->info = pInfo; - pOperator->numOfOutput = pRuntimeEnv->pQuery->numOfCols; + pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols; pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->exec = doTableScan; @@ -4363,7 +4330,7 @@ SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeE pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->times = 1; pInfo->reverseTimes = 0; - pInfo->order = pRuntimeEnv->pQuery->order.order; + pInfo->order = pRuntimeEnv->pQueryAttr->order.order; pInfo->current = 0; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); @@ -4372,7 +4339,7 @@ SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeE pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->info = pInfo; - pOperator->numOfOutput = pRuntimeEnv->pQuery->numOfCols; + pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols; pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->exec = doTableScanImpl; @@ -4398,7 +4365,7 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu pOperator->status = OP_IN_EXECUTING; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->numOfOutput = pRuntimeEnv->pQuery->numOfCols; + pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols; pOperator->exec = doBlockInfoScan; return pOperator; @@ -4454,7 +4421,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf } } -static SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime) { +SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime) { assert(repeatTime > 0); STableScanInfo* pInfo = calloc(1, sizeof(STableScanInfo)); @@ -4462,7 +4429,7 @@ static SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQuery pInfo->times = repeatTime; pInfo->reverseTimes = reverseTime; pInfo->current = 0; - pInfo->order = pRuntimeEnv->pQuery->order.order; + pInfo->order = pRuntimeEnv->pQueryAttr->order.order; SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo)); pOptr->name = "DataBlocksOptimizedScanOperator"; @@ -4475,12 +4442,206 @@ static SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQuery return pOptr; } +SArray* getOrderCheckColumns(SQueryAttr* pQuery) { + int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols; + + SArray* pOrderColumns = NULL; + if (numOfCols > 0) { + pOrderColumns = taosArrayDup(pQuery->pGroupbyExpr->columnInfo); + } else { + pOrderColumns = taosArrayInit(4, sizeof(SColIndex)); + } + + if (pQuery->interval.interval > 0) { + if (pOrderColumns == NULL) { + pOrderColumns = taosArrayInit(1, sizeof(SColIndex)); + } + + SColIndex colIndex = {.colIndex = 0, .colId = 0, .flag = TSDB_COL_NORMAL}; + taosArrayPush(pOrderColumns, &colIndex); + } + + { + numOfCols = (int32_t) taosArrayGetSize(pOrderColumns); + for(int32_t i = 0; i < numOfCols; ++i) { + SColIndex* index = taosArrayGet(pOrderColumns, i); + for(int32_t j = 0; j < pQuery->numOfOutput; ++j) { + if (index->colId == pQuery->pExpr1[j].base.colInfo.colId) { + index->colIndex = j; + index->colId = pQuery->pExpr1[j].base.resColId; + } + } + } + } + return pOrderColumns; +} + +SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) { + int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols; + + SArray* pOrderColumns = NULL; + if (numOfCols > 0) { + pOrderColumns = taosArrayDup(pQuery->pGroupbyExpr->columnInfo); + } else { + pOrderColumns = taosArrayInit(4, sizeof(SColIndex)); + } + + for(int32_t i = 0; i < numOfCols; ++i) { + SColIndex* index = taosArrayGet(pOrderColumns, i); + + bool found = false; + for(int32_t j = 0; j < pQuery->numOfOutput; ++j) { + SSqlExpr* pExpr = &pQuery->pExpr1[j].base; + + // TSDB_FUNC_TAG_DUMMY function needs to be ignored + if (index->colId == pExpr->colInfo.colId && + ((TSDB_COL_IS_TAG(pExpr->colInfo.flag) && pExpr->functionId == TSDB_FUNC_TAG) || + (TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && pExpr->functionId == TSDB_FUNC_PRJ))) { + index->colIndex = j; + index->colId = pExpr->resColId; + found = true; + break; + } + } + + assert(found && index->colIndex >= 0 && index->colIndex < pQuery->numOfOutput); + } + + return pOrderColumns; +} + +static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) { + SMultiwayMergeInfo *pInfo = (SMultiwayMergeInfo*) param; + destroyBasicOperatorInfo(&pInfo->binfo, numOfOutput); + + taosArrayDestroy(pInfo->orderColumnList); + taosArrayDestroy(pInfo->groupColumnList); + tfree(pInfo->prevRow); + tfree(pInfo->currentGroupColData); +} + +static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) { + SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param; + taosArrayDestroy(pInfo->orderColumnList); + tfree(pInfo->prevRow); +} + +SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, + SExprInfo* pExpr, int32_t numOfOutput, void* param) { + SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); + + pInfo->resultRowFactor = + (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, + false)); + + pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx + + pInfo->pMerge = param; + pInfo->bufCapacity = 4096; + + pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor); + pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); + + pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr); + pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr); + + // TODO refactor + int32_t len = 0; + for(int32_t i = 0; i < numOfOutput; ++i) { + len += pExpr[i].base.colBytes; + } + + int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0; + pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len)); + int32_t offset = POINTER_BYTES * numOfCols; + + for(int32_t i = 0; i < numOfCols; ++i) { + pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; + + SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[index->colIndex].base.resBytes; + } + + numOfCols = (pInfo->groupColumnList != NULL)? (int32_t)taosArrayGetSize(pInfo->groupColumnList):0; + pInfo->currentGroupColData = calloc(1, (POINTER_BYTES * numOfCols + len)); + offset = POINTER_BYTES * numOfCols; + + for(int32_t i = 0; i < numOfCols; ++i) { + pInfo->currentGroupColData[i] = (char*)pInfo->currentGroupColData + offset; + + SColIndex* index = taosArrayGet(pInfo->groupColumnList, i); + offset += pExpr[index->colIndex].base.resBytes; + } + + initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + + pInfo->seed = rand(); + setDefaultOutputBuf(pRuntimeEnv, &pInfo->binfo, pInfo->seed, MERGE_STAGE); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "GlobalAggregate"; + pOperator->operatorType = OP_GlobalAggregate; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->upstream = upstream; + pOperator->pExpr = pExpr; + pOperator->numOfOutput = numOfOutput; + pOperator->pRuntimeEnv = pRuntimeEnv; + + pOperator->exec = doGlobalAggregate; + pOperator->cleanup = destroyGlobalAggOperatorInfo; + return pOperator; +} + +SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SExprInfo *pExpr, int32_t numOfOutput, + int32_t numOfRows, void *merger, bool groupMix) { + SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); + + pInfo->pMerge = merger; + pInfo->groupMix = groupMix; + pInfo->bufCapacity = numOfRows; + + pInfo->orderColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr); + pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); + + { + int32_t len = 0; + for(int32_t i = 0; i < numOfOutput; ++i) { + len += pExpr[i].base.colBytes; + } + + int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0; + pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len)); + int32_t offset = POINTER_BYTES * numOfCols; + + for(int32_t i = 0; i < numOfCols; ++i) { + pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; + + SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[index->colIndex].base.colBytes; + } + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "MultiwaySortOperator"; + pOperator->operatorType = OP_MultiwayMergeSort; + pOperator->blockingOptr = false; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols; + pOperator->exec = doMultiwayMergeSort; + pOperator->cleanup = destroyGlobalAggOperatorInfo; + return pOperator; +} + static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { return pTableScanInfo->order; } // this is a blocking operator -static SSDataBlock* doAggregate(void* param) { +static SSDataBlock* doAggregate(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4491,18 +4652,20 @@ static SSDataBlock* doAggregate(void* param) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t order = pQuery->order.order; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } - setTagValue(pOperator, pQuery->current->pTable, pInfo->pCtx, pOperator->numOfOutput); + if (pRuntimeEnv->current != NULL) { + setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfOutput); + } if (upstream->operatorType == OP_DataBlocksOptScan) { STableScanInfo* pScanInfo = upstream->info; @@ -4511,7 +4674,7 @@ static SSDataBlock* doAggregate(void* param) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - doAggregateImpl(pOperator, pQuery->window.skey, pInfo->pCtx, pBlock); + doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock); } pOperator->status = OP_EXEC_DONE; @@ -4523,7 +4686,7 @@ static SSDataBlock* doAggregate(void* param) { return pInfo->pRes; } -static SSDataBlock* doSTableAggregate(void* param) { +static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4544,18 +4707,18 @@ static SSDataBlock* doSTableAggregate(void* param) { return pInfo->pRes; } - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t order = pQuery->order.order; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } - setTagValue(pOperator, pRuntimeEnv->pQuery->current->pTable, pInfo->pCtx, pOperator->numOfOutput); + setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfOutput); if (upstream->operatorType == OP_DataBlocksOptScan) { STableScanInfo* pScanInfo = upstream->info; @@ -4565,9 +4728,9 @@ static SSDataBlock* doSTableAggregate(void* param) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - TSKEY key = QUERY_IS_ASC_QUERY(pQuery)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1; - setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pQuery->current->groupIndex, key); - doAggregateImpl(pOperator, pQuery->window.skey, pInfo->pCtx, pBlock); + TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1; + setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key); + doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock); } pOperator->status = OP_RES_TO_RETURN; @@ -4586,7 +4749,7 @@ static SSDataBlock* doSTableAggregate(void* param) { return pInfo->pRes; } -static SSDataBlock* doArithmeticOperation(void* param) { +static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; SArithOperatorInfo* pArithInfo = pOperator->info; @@ -4594,28 +4757,75 @@ static SSDataBlock* doArithmeticOperation(void* param) { SOptrBasicInfo *pInfo = &pArithInfo->binfo; SSDataBlock* pRes = pInfo->pRes; - int32_t order = pRuntimeEnv->pQuery->order.order; + int32_t order = pRuntimeEnv->pQueryAttr->order.order; pRes->info.rows = 0; + if (pArithInfo->existDataBlock) { // TODO refactor + STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; + + SSDataBlock* pBlock = pArithInfo->existDataBlock; + pArithInfo->existDataBlock = NULL; + *newgroup = true; + + // todo dynamic set tags + if (pTableQueryInfo != NULL) { + setTagValue(pOperator, pTableQueryInfo->pTable, pInfo->pCtx, pOperator->numOfOutput); + } + + // the pDataBlock are always the same one, no need to call this again + setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); + updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); + + arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + + if (pTableQueryInfo != NULL) { // TODO refactor + updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); + } + + pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) { + clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); + return pRes; + } + } + while(1) { - SSDataBlock* pBlock = pOperator->upstream->exec(pOperator->upstream); + bool prevVal = *newgroup; + + // The upstream exec may change the value of the newgroup, so use a local variable instead. + SSDataBlock* pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); if (pBlock == NULL) { + assert(*newgroup == false); + + *newgroup = prevVal; setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); break; } - STableQueryInfo* pTableQueryInfo = pRuntimeEnv->pQuery->current; + // Return result of the previous group in the firstly. + if (newgroup && pRes->info.rows > 0) { + pArithInfo->existDataBlock = pBlock; + clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); + return pInfo->pRes; + } + + STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; // todo dynamic set tags - setTagValue(pOperator, pTableQueryInfo->pTable, pInfo->pCtx, pOperator->numOfOutput); + if (pTableQueryInfo != NULL) { + setTagValue(pOperator, pTableQueryInfo->pTable, pInfo->pCtx, pOperator->numOfOutput); + } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - updateOutputBuf(pArithInfo, pBlock->info.rows); + updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); - updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); + + if (pTableQueryInfo != NULL) { // TODO refactor + updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); + } pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) { @@ -4627,54 +4837,26 @@ static SSDataBlock* doArithmeticOperation(void* param) { return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } -static SSDataBlock* doLimit(void* param) { - SOperatorInfo* pOperator = (SOperatorInfo*) param; +static SSDataBlock* doLimit(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*)param; if (pOperator->status == OP_EXEC_DONE) { return NULL; } SLimitOperatorInfo* pInfo = pOperator->info; - - SSDataBlock* pBlock = pOperator->upstream->exec(pOperator->upstream); - if (pBlock == NULL) { - setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); - pOperator->status = OP_EXEC_DONE; - return NULL; - } - - if (pInfo->total + pBlock->info.rows >= pInfo->limit) { - pBlock->info.rows = (int32_t) (pInfo->limit - pInfo->total); - - pInfo->total = pInfo->limit; - - setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); - pOperator->status = OP_EXEC_DONE; - } else { - pInfo->total += pBlock->info.rows; - } - - return pBlock; -} - -// TODO add log -static SSDataBlock* doOffset(void* param) { - SOperatorInfo *pOperator = (SOperatorInfo *)param; - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + SSDataBlock* pBlock = NULL; while (1) { - SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream); + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); if (pBlock == NULL) { - setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; return NULL; } if (pRuntimeEnv->currentOffset == 0) { - return pBlock; + break; } else if (pRuntimeEnv->currentOffset >= pBlock->info.rows) { pRuntimeEnv->currentOffset -= pBlock->info.rows; } else { @@ -4682,16 +4864,28 @@ static SSDataBlock* doOffset(void* param) { pBlock->info.rows = remain; for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { - SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); int16_t bytes = pColInfoData->info.bytes; memmove(pColInfoData->pData, pColInfoData->pData + bytes * pRuntimeEnv->currentOffset, remain * bytes); } pRuntimeEnv->currentOffset = 0; - return pBlock; + break; } } + + if (pInfo->total + pBlock->info.rows >= pInfo->limit) { + pBlock->info.rows = (int32_t)(pInfo->limit - pInfo->total); + pInfo->total = pInfo->limit; + + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + } else { + pInfo->total += pBlock->info.rows; + } + + return pBlock; } @@ -4707,7 +4901,7 @@ bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem return false; } } - + if (fp(filterElem, input, input, p->info.type)) { return true; } @@ -4715,91 +4909,36 @@ bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem return false; } - -void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) { - SHavingOperatorInfo* pInfo = pOperator->info; - int32_t f = 0; - int32_t allQualified = 1; - int32_t exprQualified = 0; - - for (int32_t r = 0; r < pBlock->info.rows; ++r) { - allQualified = 1; - - for (int32_t i = 0; i < pOperator->numOfOutput; ++i) { - SExprInfo* pExprInfo = &(pOperator->pExpr[i]); - if (pExprInfo->pFilter == NULL) { - continue; - } - - SArray* es = taosArrayGetP(pInfo->fp, i); - assert(es); - - size_t fpNum = taosArrayGetSize(es); - - exprQualified = 0; - for (int32_t m = 0; m < fpNum; ++m) { - __filter_func_t fp = taosArrayGetP(es, m); - - assert(fp); - - //SColIndex* colIdx = &pExprInfo->base.colInfo; - SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i); - - SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]}; - - if (doFilterData(p, r, &filterElem, fp)) { - exprQualified = 1; - break; - } - } - - if (exprQualified == 0) { - allQualified = 0; - break; - } - } - - if (allQualified == 0) { - continue; - } - - for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { - SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i); - - int16_t bytes = pColInfoData->info.bytes; - memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes); - } - - ++f; - } - - pBlock->info.rows = f; -} - -static SSDataBlock* doHaving(void* param) { +static SSDataBlock* doFilter(void* param, bool* newgroup) { SOperatorInfo *pOperator = (SOperatorInfo *)param; if (pOperator->status == OP_EXEC_DONE) { return NULL; } + SFilterOperatorInfo* pCondInfo = pOperator->info; SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; while (1) { - SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream); + SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); if (pBlock == NULL) { - setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); - pOperator->status = OP_EXEC_DONE; - return NULL; + break; } - - doHavingImpl(pOperator, pBlock); - return pBlock; + doSetFilterColumnInfo(pCondInfo->pFilterInfo, pCondInfo->numOfFilterCols, pBlock); + assert(pRuntimeEnv->pTsBuf == NULL); + filterRowsInDataBlock(pRuntimeEnv, pCondInfo->pFilterInfo, pCondInfo->numOfFilterCols, pBlock, true); + + if (pBlock->info.rows > 0) { + return pBlock; + } } -} + setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; +} -static SSDataBlock* doIntervalAgg(void* param) { +static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4818,26 +4957,26 @@ static SSDataBlock* doIntervalAgg(void* param) { return pIntervalInfo->pRes; } - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t order = pQuery->order.order; - STimeWindow win = pQuery->window; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; + STimeWindow win = pQueryAttr->window; SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQuery->order.order); + setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order); hashIntervalAgg(pOperator, &pIntervalInfo->resultRowInfo, pBlock, 0); } // restore the value - pQuery->order.order = order; - pQuery->window = win; + pQueryAttr->order.order = order; + pQueryAttr->window = win; pOperator->status = OP_RES_TO_RETURN; closeAllResultRows(&pIntervalInfo->resultRowInfo); @@ -4854,7 +4993,7 @@ static SSDataBlock* doIntervalAgg(void* param) { return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes; } -static SSDataBlock* doSTableIntervalAgg(void* param) { +static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4872,29 +5011,29 @@ static SSDataBlock* doSTableIntervalAgg(void* param) { return pIntervalInfo->pRes; } - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t order = pQuery->order.order; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } // the pDataBlock are always the same one, no need to call this again - STableQueryInfo* pTableQueryInfo = pRuntimeEnv->pQuery->current; + STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; setTagValue(pOperator, pTableQueryInfo->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput); - setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQuery->order.order); + setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order); setIntervalQueryRange(pRuntimeEnv, pBlock->info.window.skey); hashIntervalAgg(pOperator, &pTableQueryInfo->resInfo, pBlock, pTableQueryInfo->groupIndex); } pOperator->status = OP_RES_TO_RETURN; - pQuery->order.order = order; // TODO : restore the order + pQueryAttr->order.order = order; // TODO : restore the order doCloseAllTimeWindow(pRuntimeEnv); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); @@ -4906,7 +5045,7 @@ static SSDataBlock* doSTableIntervalAgg(void* param) { return pIntervalInfo->pRes; } -static SSDataBlock* doSessionWindowAgg(void* param) { +static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4926,26 +5065,26 @@ static SSDataBlock* doSessionWindowAgg(void* param) { return pBInfo->pRes; } - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t order = pQuery->order.order; - STimeWindow win = pQuery->window; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; + STimeWindow win = pQueryAttr->window; SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, pQuery->order.order); + setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, pQueryAttr->order.order); doSessionWindowAggImpl(pOperator, pWindowInfo, pBlock); } // restore the value - pQuery->order.order = order; - pQuery->window = win; + pQueryAttr->order.order = order; + pQueryAttr->window = win; pOperator->status = OP_RES_TO_RETURN; closeAllResultRows(&pBInfo->resultRowInfo); @@ -4962,7 +5101,7 @@ static SSDataBlock* doSessionWindowAgg(void* param) { return pBInfo->pRes->info.rows == 0? NULL:pBInfo->pRes; } -static SSDataBlock* hashGroupbyAggregate(void* param) { +static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4984,16 +5123,16 @@ static SSDataBlock* hashGroupbyAggregate(void* param) { SOperatorInfo* upstream = pOperator->upstream; while(1) { - SSDataBlock* pBlock = upstream->exec(upstream); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); if (pBlock == NULL) { break; } // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pRuntimeEnv->pQuery->order.order); - setTagValue(pOperator, pRuntimeEnv->pQuery->current->pTable, pInfo->binfo.pCtx, pOperator->numOfOutput); + setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pRuntimeEnv->pQueryAttr->order.order); + setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->binfo.pCtx, pOperator->numOfOutput); if (pInfo->colIndex == -1) { - pInfo->colIndex = getGroupbyColumnIndex(pRuntimeEnv->pQuery->pGroupbyExpr, pBlock); + pInfo->colIndex = getGroupbyColumnIndex(pRuntimeEnv->pQueryAttr->pGroupbyExpr, pBlock); } doHashGroupbyAgg(pOperator, pInfo, pBlock); @@ -5003,7 +5142,7 @@ static SSDataBlock* hashGroupbyAggregate(void* param) { closeAllResultRows(&pInfo->binfo.resultRowInfo); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); - if (!pRuntimeEnv->pQuery->stableQuery) { // finalize include the update of result rows + if (!pRuntimeEnv->pQueryAttr->stableQuery) { // finalize include the update of result rows finalizeQueryResult(pOperator, pInfo->binfo.pCtx, &pInfo->binfo.resultRowInfo, pInfo->binfo.rowCellInfoOffset); } else { updateNumOfRowsInResultRows(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, &pInfo->binfo.resultRowInfo, pInfo->binfo.rowCellInfoOffset); @@ -5019,47 +5158,96 @@ static SSDataBlock* hashGroupbyAggregate(void* param) { return pInfo->binfo.pRes; } -static SSDataBlock* doFill(void* param) { +static SSDataBlock* doFill(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; } SFillOperatorInfo *pInfo = pOperator->info; - SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv; if (taosFillHasMoreResults(pInfo->pFillInfo)) { + *newgroup = false; doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity); return pInfo->pRes; } + // handle the cached new group data block + if (pInfo->existNewGroupBlock) { + pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; + int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; + taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); + + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); + + doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); + pInfo->existNewGroupBlock = NULL; + *newgroup = true; + return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; + } + while(1) { - SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream); - if (pBlock == NULL) { - if (pInfo->totalInputRows == 0) { - pOperator->status = OP_EXEC_DONE; - return NULL; - } + SSDataBlock* pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (*newgroup) { + assert(pBlock != NULL); + } - taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQuery->window.ekey); + if (*newgroup && pInfo->totalInputRows > 0) { // there are already processed current group data block + pInfo->existNewGroupBlock = pBlock; + *newgroup = false; + + // fill the previous group data block + // before handle a new data block, close the fill operation for previous group data block + taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey); } else { - pInfo->totalInputRows += pBlock->info.rows; + if (pBlock == NULL) { + if (pInfo->totalInputRows == 0) { + pOperator->status = OP_EXEC_DONE; + return NULL; + } - int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQuery->window.ekey:pBlock->info.window.ekey; + taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey); + } else { + pInfo->totalInputRows += pBlock->info.rows; + + int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey + : */pBlock->info.window.ekey; - taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pBlock); + taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pBlock); + } } doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); - return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; + if (pInfo->pRes->info.rows > 0) { // current group has no more result to return + return pInfo->pRes; + } else if (pInfo->existNewGroupBlock) { // try next group + pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; + int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey + :*/ pInfo->existNewGroupBlock->info.window.ekey; + taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); + + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); + + doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); + pInfo->existNewGroupBlock = NULL; + *newgroup = true; + + return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL; + } else { + return NULL; + } + // return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } } // todo set the attribute of query scan count -static int32_t getNumOfScanTimes(SQuery* pQuery) { - for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pExpr1[i].base.functionId; +static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr) { + for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_STDDEV || functionId == TSDB_FUNC_PERCT) { return 2; } @@ -5082,11 +5270,11 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { tfree(pOperator); } -static SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { +SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); - SQuery* pQuery = pRuntimeEnv->pQuery; - int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery)); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -5094,7 +5282,7 @@ static SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); pInfo->seed = rand(); - setDefaultOutputBuf(pRuntimeEnv, &pInfo->binfo, pInfo->seed); + setDefaultOutputBuf(pRuntimeEnv, &pInfo->binfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "TableAggregate"; @@ -5149,11 +5337,15 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } -static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) { - SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param; - if (pInfo->fp) { - taosArrayDestroy(pInfo->fp); - } +static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { + SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param; + doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols); +} + +static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { + SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; + taosHashCleanup(pInfo->pSet); + pInfo->pRes = destroyOutputBuf(pInfo->pRes); } SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { @@ -5193,7 +5385,7 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pBInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pBInfo->rowCellInfoOffset); initResultRowInfo(&pBInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); - setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed); + setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "ArithmeticOperator"; @@ -5212,86 +5404,59 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI return pOperator; } +SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, + int32_t numOfOutput) { + SFilterOperatorInfo* pInfo = calloc(1, sizeof(SFilterOperatorInfo)); -int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) { - __filter_func_t fp = NULL; + { + SColumnInfo* pCols = calloc(numOfOutput, sizeof(SColumnInfo)); - *fps = taosArrayInit(numOfOutput, sizeof(SArray*)); - if (*fps == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + int32_t numOfFilter = 0; + for(int32_t i = 0; i < numOfOutput; ++i) { + if (pExpr[i].base.flist.numOfFilters > 0) { + numOfFilter += 1; + } - for (int32_t i = 0; i < numOfOutput; ++i) { - SExprInfo* pExprInfo = &(pExpr[i]); - SColIndex* colIdx = &pExprInfo->base.colInfo; + pCols[i].type = pExpr[i].base.resType; + pCols[i].bytes = pExpr[i].base.resBytes; + pCols[i].colId = pExpr[i].base.resColId; - if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) { - taosArrayPush(*fps, &fp); - - continue; + pCols[i].flist.numOfFilters = pExpr[i].base.flist.numOfFilters; + pCols[i].flist.filterInfo = calloc(pCols[i].flist.numOfFilters, sizeof(SColumnFilterInfo)); + memcpy(pCols[i].flist.filterInfo, pExpr[i].base.flist.filterInfo, pCols[i].flist.numOfFilters * sizeof(SColumnFilterInfo)); } - int32_t filterNum = pExprInfo->base.filterNum; - SColumnFilterInfo *filterInfo = pExprInfo->pFilter; + assert(numOfFilter > 0); + doCreateFilterInfo(pCols, numOfOutput, numOfFilter, &pInfo->pFilterInfo, 0); + pInfo->numOfFilterCols = numOfFilter; - SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t)); - - for (int32_t j = 0; j < filterNum; ++j) { - int32_t lower = filterInfo->lowerRelOptr; - int32_t upper = filterInfo->upperRelOptr; - if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { - qError("invalid rel optr"); - taosArrayDestroy(es); - return TSDB_CODE_QRY_APP_ERROR; - } - - __filter_func_t ffp = getFilterOperator(lower, upper); - if (ffp == NULL) { - qError("invalid filter info"); - taosArrayDestroy(es); - return TSDB_CODE_QRY_APP_ERROR; - } - - taosArrayPush(es, &ffp); - - filterInfo += 1; + for(int32_t i = 0; i < numOfOutput; ++i) { + tfree(pCols[i].flist.filterInfo); } - taosArrayPush(*fps, &es); + tfree(pCols); } - return TSDB_CODE_SUCCESS; -} - -SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { - SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo)); - - initFilterFp(pExpr, numOfOutput, &pInfo->fp); - - assert(pInfo->fp); - SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); - pOperator->name = "HavingOperator"; - pOperator->operatorType = OP_Having; + pOperator->name = "ConditionOperator"; + pOperator->operatorType = OP_Filter; pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->numOfOutput = numOfOutput; pOperator->pExpr = pExpr; pOperator->upstream = upstream; - pOperator->exec = doHaving; + pOperator->exec = doFilter; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->cleanup = destroyHavingOperatorInfo; + pOperator->cleanup = destroyConditionOperatorInfo; return pOperator; } - - SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) { SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo)); - pInfo->limit = pRuntimeEnv->pQuery->limit.limit; + pInfo->limit = pRuntimeEnv->pQueryAttr->limit.limit; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); @@ -5307,24 +5472,6 @@ SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI return pOperator; } -SOperatorInfo* createOffsetOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) { - SOffsetOperatorInfo* pInfo = calloc(1, sizeof(SOffsetOperatorInfo)); - - pInfo->offset = pRuntimeEnv->pQuery->limit.offset; - SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); - - pOperator->name = "OffsetOperator"; - pOperator->operatorType = OP_Offset; - pOperator->blockingOptr = false; - pOperator->status = OP_IN_EXECUTING; - pOperator->upstream = upstream; - pOperator->exec = doOffset; - pOperator->info = pInfo; - pOperator->pRuntimeEnv = pRuntimeEnv; - - return pOperator; -} - SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo)); @@ -5428,17 +5575,18 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); { - SQuery* pQuery = pRuntimeEnv->pQuery; - SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfOutput, pQuery->fillVal); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfOutput, pQueryAttr->fillVal); STimeWindow w = TSWINDOW_INITIALIZER; - TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); - TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); - getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); + TSKEY sk = MIN(pQueryAttr->window.skey, pQueryAttr->window.ekey); + TSKEY ek = MAX(pQueryAttr->window.skey, pQueryAttr->window.ekey); + getAlignQueryTimeWindow(pQueryAttr, pQueryAttr->window.skey, sk, ek, &w); - pInfo->pFillInfo = taosCreateFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pRuntimeEnv->resultInfo.capacity, numOfOutput, - pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision, - pQuery->fillType, pColInfo, pRuntimeEnv->qinfo); + pInfo->pFillInfo = + taosCreateFillInfo(pQueryAttr->order.order, w.skey, 0, (int32_t)pRuntimeEnv->resultInfo.capacity, numOfOutput, + pQueryAttr->interval.sliding, pQueryAttr->interval.slidingUnit, + (int8_t)pQueryAttr->precision, pQueryAttr->fillType, pColInfo, pRuntimeEnv->qinfo); } SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); @@ -5460,7 +5608,50 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn return pOperator; } -static SSDataBlock* doTagScan(void* param) { +SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger) { + SSLimitOperatorInfo* pInfo = calloc(1, sizeof(SSLimitOperatorInfo)); + + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + + pInfo->orderColumnList = getResultGroupCheckColumns(pQueryAttr); + pInfo->slimit = pQueryAttr->slimit; + pInfo->limit = pQueryAttr->limit; + + pInfo->currentGroupOffset = pQueryAttr->slimit.offset; + pInfo->currentOffset = pQueryAttr->limit.offset; + + // TODO refactor + int32_t len = 0; + for(int32_t i = 0; i < numOfOutput; ++i) { + len += pExpr[i].base.resBytes; + } + + int32_t numOfCols = pInfo->orderColumnList != NULL? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0; + pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len)); + int32_t offset = POINTER_BYTES * numOfCols; + + for(int32_t i = 0; i < numOfCols; ++i) { + pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; + + SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[index->colIndex].base.resBytes; + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + + pOperator->name = "SLimitOperator"; + pOperator->operatorType = OP_SLimit; + pOperator->blockingOptr = false; + pOperator->status = OP_IN_EXECUTING; + pOperator->upstream = upstream; + pOperator->exec = doSLimit; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->cleanup = destroySlimitOperatorInfo; + return pOperator; +} + +static SSDataBlock* doTagScan(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -5472,27 +5663,28 @@ static SSDataBlock* doTagScan(void* param) { STagScanInfo *pInfo = pOperator->info; SSDataBlock *pRes = pInfo->pRes; + *newgroup = false; int32_t count = 0; SArray* pa = GET_TABLEGROUP(pRuntimeEnv, 0); int32_t functionId = pOperator->pExpr[0].base.functionId; if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id - SQuery* pQuery = pRuntimeEnv->pQuery; - assert(pQuery->numOfOutput == 1); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + assert(pQueryAttr->numOfOutput == 1); SExprInfo* pExprInfo = &pOperator->pExpr[0]; - int32_t rsize = pExprInfo->bytes; + int32_t rsize = pExprInfo->base.resBytes; count = 0; - int16_t bytes = pExprInfo->bytes; - int16_t type = pExprInfo->type; + int16_t bytes = pExprInfo->base.resBytes; + int16_t type = pExprInfo->base.resType; - for(int32_t i = 0; i < pQuery->numOfTags; ++i) { - if (pQuery->tagColList[i].colId == pExprInfo->base.colInfo.colId) { - bytes = pQuery->tagColList[i].bytes; - type = pQuery->tagColList[i].type; + for(int32_t i = 0; i < pQueryAttr->numOfTags; ++i) { + if (pQueryAttr->tagColList[i].colId == pExprInfo->base.colInfo.colId) { + bytes = pQueryAttr->tagColList[i].bytes; + type = pQueryAttr->tagColList[i].type; break; } } @@ -5518,8 +5710,8 @@ static SSDataBlock* doTagScan(void* param) { *(int32_t *)output = id->tid; output += sizeof(id->tid); - *(int32_t *)output = pQuery->vgId; - output += sizeof(pQuery->vgId); + *(int32_t *)output = pQueryAttr->vgId; + output += sizeof(pQueryAttr->vgId); char* data = NULL; if (pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { @@ -5532,14 +5724,14 @@ static SSDataBlock* doTagScan(void* param) { count += 1; } - qDebug("QInfo:%"PRIu64" create (tableId, tag) info completed, rows:%d", GET_QID(pRuntimeEnv), count); + qDebug("QInfo:0x%"PRIx64" create (tableId, tag) info completed, rows:%d", GET_QID(pRuntimeEnv), count); } else if (functionId == TSDB_FUNC_COUNT) {// handle the "count(tbname)" query SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0); *(int64_t*)pColInfo->pData = pInfo->totalTables; count = 1; pOperator->status = OP_EXEC_DONE; - qDebug("QInfo:%"PRIu64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count); + qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count); } else { // return only the tags|table name etc. SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo @@ -5558,8 +5750,8 @@ static SSDataBlock* doTagScan(void* param) { } SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, j); - type = pExprInfo[j].type; - bytes = pExprInfo[j].bytes; + type = pExprInfo[j].base.resType; + bytes = pExprInfo[j].base.resBytes; if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { data = tsdbGetTableName(item->pTable); @@ -5567,7 +5759,7 @@ static SSDataBlock* doTagScan(void* param) { data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.colInfo.colId, type, bytes); } - dst = pColInfo->pData + count * pExprInfo[j].bytes; + dst = pColInfo->pData + count * pExprInfo[j].base.resBytes; doSetTagValueToResultBuf(dst, data, type, bytes); } @@ -5578,7 +5770,7 @@ static SSDataBlock* doTagScan(void* param) { pOperator->status = OP_EXEC_DONE; } - qDebug("QInfo:%"PRIu64" create tag values results completed, rows:%d", GET_QID(pRuntimeEnv), count); + qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_QID(pRuntimeEnv), count); } pRes->info.rows = count; @@ -5610,30 +5802,112 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf return pOperator; } -static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) { +static SSDataBlock* hashDistinct(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SDistinctOperatorInfo* pInfo = pOperator->info; + SSDataBlock* pRes = pInfo->pRes; + + pRes->info.rows = 0; + SSDataBlock* pBlock = NULL; + while(1) { + pBlock = pOperator->upstream->exec(pOperator->upstream, newgroup); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; + } + + assert(pBlock->info.numOfCols == 1); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0); + + int16_t bytes = pColInfoData->info.bytes; + int16_t type = pColInfoData->info.type; + + // ensure the output buffer size + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0); + if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { + int32_t newSize = pRes->info.rows + pBlock->info.rows; + char* tmp = realloc(pResultColInfoData->pData, newSize * bytes); + if (tmp == NULL) { + return NULL; + } else { + pResultColInfoData->pData = tmp; + pInfo->outputCapacity = newSize; + } + } + + for(int32_t i = 0; i < pBlock->info.rows; ++i) { + char* val = ((char*)pColInfoData->pData) + bytes * i; + if (isNull(val, type)) { + continue; + } + + void* res = taosHashGet(pInfo->pSet, val, bytes); + if (res == NULL) { + taosHashPut(pInfo->pSet, val, bytes, NULL, 0); + char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; + memcpy(start, val, bytes); + pRes->info.rows += 1; + } + } + + if (pRes->info.rows >= pInfo->threshold) { + break; + } + } + + return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; +} + +SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); + + pInfo->outputCapacity = 4096; + pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); + pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "DistinctOperator"; + pOperator->blockingOptr = false; + pOperator->status = OP_IN_EXECUTING; + pOperator->operatorType = OP_Distinct; + pOperator->upstream = upstream; + pOperator->numOfOutput = numOfOutput; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->exec = hashDistinct; + pOperator->cleanup = destroyDistinctOperatorInfo; + return pOperator; +} + +static int32_t getColumnIndexInSource(SQueriedTableInfo *pTableInfo, SSqlExpr *pExpr, SColumnInfo* pTagCols) { int32_t j = 0; - if (TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { - if (pExprMsg->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + if (pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { return TSDB_TBNAME_COLUMN_INDEX; - } else if (pExprMsg->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { + } else if (pExpr->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { return TSDB_BLOCK_DIST_COLUMN_INDEX; } - while(j < pQueryMsg->numOfTags) { - if (pExprMsg->colInfo.colId == pTagCols[j].colId) { + while(j < pTableInfo->numOfTags) { + if (pExpr->colInfo.colId == pTagCols[j].colId) { return j; } j += 1; } - } else if (TSDB_COL_IS_UD_COL(pExprMsg->colInfo.flag)) { // user specified column data + } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { // user specified column data return TSDB_UD_COLUMN_INDEX; } else { - while (j < pQueryMsg->numOfCols) { - if (pExprMsg->colInfo.colId == pQueryMsg->colList[j].colId) { + while (j < pTableInfo->numOfCols) { + if (pExpr->colInfo.colId == pTableInfo->colList[j].colId) { return j; } @@ -5644,8 +5918,8 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE return INT32_MIN; // return a less than TSDB_TBNAME_COLUMN_INDEX value } -bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) { - int32_t j = getColumnIndexInSource(pQueryMsg, pExprMsg, pTagCols); +bool validateExprColumnInfo(SQueriedTableInfo *pTableInfo, SSqlExpr *pExpr, SColumnInfo* pTagCols) { + int32_t j = getColumnIndexInSource(pTableInfo, pExpr, pTagCols); return j != INT32_MIN; } @@ -5684,21 +5958,21 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) { return true; } -static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pExprMsg, SColumnInfo* pTagCols) { - int32_t numOfTotal = pQueryMsg->numOfCols + pQueryMsg->numOfTags; - if (pQueryMsg->numOfCols < 0 || pQueryMsg->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) { - qError("qmsg:%p illegal value of numOfCols %d numOfTags:%d", pQueryMsg, pQueryMsg->numOfCols, pQueryMsg->numOfTags); +static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput, + SColumnInfo* pTagCols, void* pMsg) { + int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags; + if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) { + qError("qmsg:%p illegal value of numOfCols %d numOfTags:%d", pMsg, pTableInfo->numOfCols, pTableInfo->numOfTags); return false; } - if (numOfTotal == 0) { - for(int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { - SSqlFuncMsg* pFuncMsg = pExprMsg[i]; - - if ((pFuncMsg->functionId == TSDB_FUNC_TAGPRJ) || - (pFuncMsg->functionId == TSDB_FUNC_TID_TAG && pFuncMsg->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) || - (pFuncMsg->functionId == TSDB_FUNC_COUNT && pFuncMsg->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) || - (pFuncMsg->functionId == TSDB_FUNC_BLKINFO)) { + if (numOfTotal == 0) { // table total columns are not required. + for(int32_t i = 0; i < numOfOutput; ++i) { + SSqlExpr* p = pExpr[i]; + if ((p->functionId == TSDB_FUNC_TAGPRJ) || + (p->functionId == TSDB_FUNC_TID_TAG && p->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) || + (p->functionId == TSDB_FUNC_COUNT && p->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) || + (p->functionId == TSDB_FUNC_BLKINFO)) { continue; } @@ -5706,8 +5980,8 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx } } - for(int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { - if (!validateExprColumnInfo(pQueryMsg, pExprMsg[i], pTagCols)) { + for(int32_t i = 0; i < numOfOutput; ++i) { + if (!validateExprColumnInfo(pTableInfo, pExpr[i], pTagCols)) { return TSDB_CODE_QRY_INVALID_MSG; } } @@ -5734,6 +6008,37 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p return pMsg; } +static int32_t deserializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t numOfFilters, char** pMsg) { + for (int32_t f = 0; f < numOfFilters; ++f) { + SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)(*pMsg); + + SColumnFilterInfo *pColFilter = &pColFilters[f]; + pColFilter->filterstr = htons(pFilterMsg->filterstr); + + (*pMsg) += sizeof(SColumnFilterInfo); + + if (pColFilter->filterstr) { + pColFilter->len = htobe64(pFilterMsg->len); + + pColFilter->pz = (int64_t)calloc(1, (size_t)(pColFilter->len + 1 * TSDB_NCHAR_SIZE)); // note: null-terminator + if (pColFilter->pz == 0) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy((void *)pColFilter->pz, (*pMsg), (size_t)pColFilter->len); + (*pMsg) += (pColFilter->len + 1); + } else { + pColFilter->lowerBndi = htobe64(pFilterMsg->lowerBndi); + pColFilter->upperBndi = htobe64(pFilterMsg->upperBndi); + } + + pColFilter->lowerRelOptr = htons(pFilterMsg->lowerRelOptr); + pColFilter->upperRelOptr = htons(pFilterMsg->upperRelOptr); + } + + return TSDB_CODE_SUCCESS; +} + /** * pQueryMsg->head has been converted before this function is called. * @@ -5750,7 +6055,6 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { } pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables); - pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey); pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey); pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval); @@ -5769,10 +6073,10 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput); pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols); pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen); - pQueryMsg->tsOffset = htonl(pQueryMsg->tsOffset); - pQueryMsg->tsLen = htonl(pQueryMsg->tsLen); - pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks); - pQueryMsg->tsOrder = htonl(pQueryMsg->tsOrder); + pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset); + pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); + pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); + pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder); pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags); pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen); pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput); @@ -5780,6 +6084,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen); pQueryMsg->sw.gap = htobe64(pQueryMsg->sw.gap); pQueryMsg->sw.primaryColId = htonl(pQueryMsg->sw.primaryColId); + pQueryMsg->tableScanOperator = htonl(pQueryMsg->tableScanOperator); + pQueryMsg->numOfOperator = htonl(pQueryMsg->numOfOperator); // query msg safety check if (!validateQueryMsg(pQueryMsg)) { @@ -5787,14 +6093,14 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { goto _cleanup; } - char *pMsg = (char *)(pQueryMsg->colList) + sizeof(SColumnInfo) * pQueryMsg->numOfCols; + char *pMsg = (char *)(pQueryMsg->tableCols) + sizeof(SColumnInfo) * pQueryMsg->numOfCols; for (int32_t col = 0; col < pQueryMsg->numOfCols; ++col) { - SColumnInfo *pColInfo = &pQueryMsg->colList[col]; + SColumnInfo *pColInfo = &pQueryMsg->tableCols[col]; pColInfo->colId = htons(pColInfo->colId); pColInfo->type = htons(pColInfo->type); pColInfo->bytes = htons(pColInfo->bytes); - pColInfo->numOfFilters = htons(pColInfo->numOfFilters); + pColInfo->flist.numOfFilters = htons(pColInfo->flist.numOfFilters); if (!isValidDataType(pColInfo->type)) { qDebug("qmsg:%p, invalid data type in source column, index:%d, type:%d", pQueryMsg, col, pColInfo->type); @@ -5802,102 +6108,57 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { goto _cleanup; } - int32_t numOfFilters = pColInfo->numOfFilters; + int32_t numOfFilters = pColInfo->flist.numOfFilters; if (numOfFilters > 0) { - pColInfo->filters = calloc(numOfFilters, sizeof(SColumnFilterInfo)); - if (pColInfo->filters == NULL) { + pColInfo->flist.filterInfo = calloc(numOfFilters, sizeof(SColumnFilterInfo)); + if (pColInfo->flist.filterInfo == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } } - for (int32_t f = 0; f < numOfFilters; ++f) { - SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pMsg; - - SColumnFilterInfo *pColFilter = &pColInfo->filters[f]; - pColFilter->filterstr = htons(pFilterMsg->filterstr); - - pMsg += sizeof(SColumnFilterInfo); - - if (pColFilter->filterstr) { - pColFilter->len = htobe64(pFilterMsg->len); - - pColFilter->pz = (int64_t)calloc(1, (size_t)(pColFilter->len + 1 * TSDB_NCHAR_SIZE)); // note: null-terminator - if (pColFilter->pz == 0) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _cleanup; - } - - memcpy((void *)pColFilter->pz, pMsg, (size_t)pColFilter->len); - pMsg += (pColFilter->len + 1); - } else { - pColFilter->lowerBndi = htobe64(pFilterMsg->lowerBndi); - pColFilter->upperBndi = htobe64(pFilterMsg->upperBndi); - } - - pColFilter->lowerRelOptr = htons(pFilterMsg->lowerRelOptr); - pColFilter->upperRelOptr = htons(pFilterMsg->upperRelOptr); + code = deserializeColFilterInfo(pColInfo->flist.filterInfo, numOfFilters, &pMsg); + if (code != TSDB_CODE_SUCCESS) { + goto _cleanup; } } - param->pExprMsg = calloc(pQueryMsg->numOfOutput, POINTER_BYTES); - if (param->pExprMsg == NULL) { + param->tableScanOperator = pQueryMsg->tableScanOperator; + param->pExpr = calloc(pQueryMsg->numOfOutput, POINTER_BYTES); + if (param->pExpr == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } - SSqlFuncMsg *pExprMsg = (SSqlFuncMsg *)pMsg; + SSqlExpr *pExprMsg = (SSqlExpr *)pMsg; for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { - param->pExprMsg[i] = pExprMsg; + param->pExpr[i] = pExprMsg; pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); - pExprMsg->colType = htons(pExprMsg->colType); pExprMsg->colBytes = htons(pExprMsg->colBytes); + pExprMsg->colType = htons(pExprMsg->colType); + + pExprMsg->resType = htons(pExprMsg->resType); + pExprMsg->resBytes = htons(pExprMsg->resBytes); pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); pExprMsg->resColId = htons(pExprMsg->resColId); - pExprMsg->filterNum = htonl(pExprMsg->filterNum); - - pMsg += sizeof(SSqlFuncMsg); - - SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo; - - pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum; - - for (int32_t f = 0; f < pExprMsg->filterNum; ++f) { - SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo; - - pFilterMsg->filterstr = htons(pFilterMsg->filterstr); - - if (pFilterMsg->filterstr) { - pFilterMsg->len = htobe64(pFilterMsg->len); - - pFilterMsg->pz = (int64_t)pMsg; - pMsg += (pFilterMsg->len + 1); - } else { - pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi); - pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi); - } - - pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr); - pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr); - - pExprFilterInfo++; - } + pExprMsg->flist.numOfFilters = htons(pExprMsg->flist.numOfFilters); + pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); - pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); + pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); - if (pExprMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { - pExprMsg->arg[j].argValue.pz = pMsg; - pMsg += pExprMsg->arg[j].argBytes; // one more for the string terminated char. + if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { + pExprMsg->param[j].pz = pMsg; + pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char. } else { - pExprMsg->arg[j].argValue.i64 = htobe64(pExprMsg->arg[j].argValue.i64); + pExprMsg->param[j].i64 = htobe64(pExprMsg->param[j].i64); } } @@ -5909,36 +6170,43 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { } } - pExprMsg = (SSqlFuncMsg *)pMsg; + if (pExprMsg->flist.numOfFilters > 0) { + pExprMsg->flist.filterInfo = calloc(pExprMsg->flist.numOfFilters, sizeof(SColumnFilterInfo)); + } + + deserializeColFilterInfo(pExprMsg->flist.filterInfo, pExprMsg->flist.numOfFilters, &pMsg); + pExprMsg = (SSqlExpr *)pMsg; } if (pQueryMsg->secondStageOutput) { - pExprMsg = (SSqlFuncMsg *)pMsg; - param->pSecExprMsg = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); + pExprMsg = (SSqlExpr *)pMsg; + param->pSecExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) { - param->pSecExprMsg[i] = pExprMsg; + param->pSecExpr[i] = pExprMsg; pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); - pExprMsg->colType = htons(pExprMsg->colType); + pExprMsg->resType = htons(pExprMsg->resType); + pExprMsg->resBytes = htons(pExprMsg->resBytes); pExprMsg->colBytes = htons(pExprMsg->colBytes); + pExprMsg->colType = htons(pExprMsg->colType); pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); - pMsg += sizeof(SSqlFuncMsg); + pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); - pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); + pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); - if (pExprMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { - pExprMsg->arg[j].argValue.pz = pMsg; - pMsg += pExprMsg->arg[j].argBytes; // one more for the string terminated char. + if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { + pExprMsg->param[j].pz = pMsg; + pMsg += pExprMsg->param[j].nLen; // one more for the string terminated char. } else { - pExprMsg->arg[j].argValue.i64 = htobe64(pExprMsg->arg[j].argValue.i64); + pExprMsg->param[j].i64 = htobe64(pExprMsg->param[j].i64); } } @@ -5950,7 +6218,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { } } - pExprMsg = (SSqlFuncMsg *)pMsg; + pExprMsg = (SSqlExpr *)pMsg; } } @@ -6006,7 +6274,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pTagCol->colId = htons(pTagCol->colId); pTagCol->bytes = htons(pTagCol->bytes); pTagCol->type = htons(pTagCol->type); - pTagCol->numOfFilters = 0; + pTagCol->flist.numOfFilters = 0; param->pTagColumnInfo[i] = *pTagCol; pMsg += sizeof(SColumnInfo); @@ -6048,13 +6316,22 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { } //skip ts buf - if ((pQueryMsg->tsOffset + pQueryMsg->tsLen) > 0) { - pMsg = (char *)pQueryMsg + pQueryMsg->tsOffset + pQueryMsg->tsLen; + if ((pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen) > 0) { + pMsg = (char *)pQueryMsg + pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen; + } + + param->pOperator = taosArrayInit(pQueryMsg->numOfOperator, sizeof(int32_t)); + for(int32_t i = 0; i < pQueryMsg->numOfOperator; ++i) { + int32_t op = htonl(*(int32_t*)pMsg); + taosArrayPush(param->pOperator, &op); + + pMsg += sizeof(int32_t); } param->sql = strndup(pMsg, pQueryMsg->sqlstrLen); - if (!validateQuerySourceCols(pQueryMsg, param->pExprMsg, param->pTagColumnInfo)) { + SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols}; + if (!validateQueryTableCols(&info, param->pExpr, pQueryMsg->numOfOutput, param->pTagColumnInfo, pQueryMsg)) { code = TSDB_CODE_QRY_INVALID_MSG; goto _cleanup; } @@ -6063,7 +6340,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { "outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64, pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols, pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval, - pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset); + pQueryMsg->fillType, pQueryMsg->tsBuf.tsLen, pQueryMsg->tsBuf.tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset); qDebug("qmsg:%p, sql:%s", pQueryMsg, param->sql); return TSDB_CODE_SUCCESS; @@ -6073,75 +6350,75 @@ _cleanup: return code; } -int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { - if (filterNum <= 0) { - return TSDB_CODE_SUCCESS; - } + int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { + if (filterNum <= 0) { + return TSDB_CODE_SUCCESS; + } - *dst = calloc(filterNum, sizeof(*src)); - if (*dst == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } + *dst = calloc(filterNum, sizeof(*src)); + if (*dst == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } - memcpy(*dst, src, sizeof(*src) * filterNum); - - for (int32_t i = 0; i < filterNum; i++) { - if ((*dst)[i].filterstr && dst[i]->len > 0) { - void *pz = calloc(1, (size_t)(*dst)[i].len + 1); - - if (pz == NULL) { - if (i == 0) { - free(*dst); - } else { - freeColumnFilterInfo(*dst, i); - } + memcpy(*dst, src, sizeof(*src) * filterNum); - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } + for (int32_t i = 0; i < filterNum; i++) { + if ((*dst)[i].filterstr && dst[i]->len > 0) { + void *pz = calloc(1, (size_t)(*dst)[i].len + 1); - memcpy(pz, (void *)src->pz, (size_t)src->len + 1); + if (pz == NULL) { + if (i == 0) { + free(*dst); + } else { + freeColumnFilterInfo(*dst, i); + } - (*dst)[i].pz = (int64_t)pz; - } - } + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } - return TSDB_CODE_SUCCESS; -} + memcpy(pz, (void *)src->pz, (size_t)src->len + 1); + (*dst)[i].pz = (int64_t)pz; + } + } -static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { + return TSDB_CODE_SUCCESS; + } + +int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); tExprNode* pExprNode = NULL; TRY(TSDB_MAX_TAG_CONDITIONS) { - pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes); + pExprNode = exprTreeFromBinary(pExprInfo->base.param[0].pz, pExprInfo->base.param[0].nLen); } CATCH( code ) { CLEANUP_EXECUTE(); - qError("qmsg:%p failed to create arithmetic expression string from:%s, reason: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz, tstrerror(code)); + qError("qmsg:%p failed to create arithmetic expression string from:%s, reason: %s", pQueryMsg, pExprInfo->base.param[0].pz, tstrerror(code)); return code; } END_TRY if (pExprNode == NULL) { - qError("qmsg:%p failed to create arithmetic expression string from:%s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); + qError("qmsg:%p failed to create arithmetic expression string from:%s", pQueryMsg, pExprInfo->base.param[0].pz); return TSDB_CODE_QRY_APP_ERROR; } - pArithExprInfo->pExpr = pExprNode; + pExprInfo->pExpr = pExprNode; return TSDB_CODE_SUCCESS; } -static int32_t updateOutputBufForTopBotQuery(SQueryTableMsg* pQueryMsg, SColumnInfo* pTagCols, SExprInfo* pExprs, int32_t numOfOutput, int32_t tagLen, bool superTable) { + +static int32_t updateOutputBufForTopBotQuery(SQueriedTableInfo* pTableInfo, SColumnInfo* pTagCols, SExprInfo* pExprs, int32_t numOfOutput, int32_t tagLen, bool superTable) { for (int32_t i = 0; i < numOfOutput; ++i) { int16_t functId = pExprs[i].base.functionId; if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM) { - int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].base, pTagCols); - if (j < 0 || j >= pQueryMsg->numOfCols) { + int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols); + if (j < 0 || j >= pTableInfo->numOfCols) { return TSDB_CODE_QRY_INVALID_MSG; } else { - SColumnInfo* pCol = &pQueryMsg->colList[j]; - int32_t ret = getResultDataInfo(pCol->type, pCol->bytes, functId, (int32_t)pExprs[i].base.arg[0].argValue.i64, - &pExprs[i].type, &pExprs[i].bytes, &pExprs[i].interBytes, tagLen, superTable); + SColumnInfo* pCol = &pTableInfo->colList[j]; + int32_t ret = getResultDataInfo(pCol->type, pCol->bytes, functId, (int32_t)pExprs[i].base.param[0].i64, + &pExprs[i].base.resType, &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, tagLen, superTable); assert(ret == TSDB_CODE_SUCCESS); } } @@ -6151,29 +6428,33 @@ static int32_t updateOutputBufForTopBotQuery(SQueryTableMsg* pQueryMsg, SColumnI } // TODO tag length should be passed from client -int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutput, SExprInfo** pExprInfo, - SSqlFuncMsg** pExprMsg, SColumnInfo* pTagCols) { +int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo, + SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; - SExprInfo *pExprs = (SExprInfo *)calloc(pQueryMsg->numOfOutput, sizeof(SExprInfo)); + SExprInfo *pExprs = (SExprInfo *)calloc(numOfOutput, sizeof(SExprInfo)); if (pExprs == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - bool isSuperTable = QUERY_IS_STABLE_QUERY(pQueryMsg->queryType); + bool isSuperTable = QUERY_IS_STABLE_QUERY(queryType); int16_t tagLen = 0; for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; - pExprs[i].bytes = 0; + memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); + + for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) { + tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]); + } int16_t type = 0; int16_t bytes = 0; // parse the arithmetic expression if (pExprs[i].base.functionId == TSDB_FUNC_ARITHM) { - code = buildArithmeticExprFromMsg(&pExprs[i], pQueryMsg); + code = buildArithmeticExprFromMsg(&pExprs[i], pMsg); if (code != TSDB_CODE_SUCCESS) { tfree(pExprs); @@ -6190,30 +6471,29 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu SSchema s = tGetBlockDistColumnSchema(); type = s.type; bytes = s.bytes; - } else if (pExprs[i].base.colInfo.colId <= TSDB_UD_COLUMN_INDEX) { + } else if (pExprs[i].base.colInfo.colId <= TSDB_UD_COLUMN_INDEX && pExprs[i].base.colInfo.colId > TSDB_RES_COL_ID) { // it is a user-defined constant value column assert(pExprs[i].base.functionId == TSDB_FUNC_PRJ); - type = pExprs[i].base.arg[1].argType; - bytes = pExprs[i].base.arg[1].argBytes; - + type = pExprs[i].base.param[1].nType; + bytes = pExprs[i].base.param[1].nLen; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { bytes += VARSTR_HEADER_SIZE; } } else { - int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].base, pTagCols); + int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols); if (TSDB_COL_IS_TAG(pExprs[i].base.colInfo.flag)) { - if (j < TSDB_BLOCK_DIST_COLUMN_INDEX || j >= pQueryMsg->numOfTags) { + if (j < TSDB_BLOCK_DIST_COLUMN_INDEX || j >= pTableInfo->numOfTags) { return TSDB_CODE_QRY_INVALID_MSG; } } else { - if (j < PRIMARYKEY_TIMESTAMP_COL_INDEX || j >= pQueryMsg->numOfCols) { + if (j < PRIMARYKEY_TIMESTAMP_COL_INDEX || j >= pTableInfo->numOfCols) { return TSDB_CODE_QRY_INVALID_MSG; } } if (pExprs[i].base.colInfo.colId != TSDB_TBNAME_COLUMN_INDEX && j >= 0) { - SColumnInfo* pCol = (TSDB_COL_IS_TAG(pExprs[i].base.colInfo.flag))? &pTagCols[j]:&pQueryMsg->colList[j]; + SColumnInfo* pCol = (TSDB_COL_IS_TAG(pExprs[i].base.colInfo.flag))? &pTagCols[j]:&pTableInfo->colList[j]; type = pCol->type; bytes = pCol->bytes; } else { @@ -6223,43 +6503,45 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu bytes = s->bytes; } - if (pExprs[i].base.filterNum > 0) { - int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum); + if (pExprs[i].base.flist.numOfFilters > 0) { + int32_t ret = cloneExprFilterInfo(&pExprs[i].base.flist.filterInfo, pExprMsg[i]->flist.filterInfo, + pExprMsg[i]->flist.numOfFilters); if (ret) { return ret; } } } - int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64; + int32_t param = (int32_t)pExprs[i].base.param[0].i64; if (pExprs[i].base.functionId != TSDB_FUNC_ARITHM && (type != pExprs[i].base.colType || bytes != pExprs[i].base.colBytes)) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } - if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].type, &pExprs[i].bytes, - &pExprs[i].interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { + if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, + &pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } if (pExprs[i].base.functionId == TSDB_FUNC_TAG_DUMMY || pExprs[i].base.functionId == TSDB_FUNC_TS_DUMMY) { - tagLen += pExprs[i].bytes; + tagLen += pExprs[i].base.resBytes; } - assert(isValidDataType(pExprs[i].type)); + assert(isValidDataType(pExprs[i].base.resType)); } // the tag length is affected by other tag columns, so this should be update. - updateOutputBufForTopBotQuery(pQueryMsg, pTagCols, pExprs, numOfOutput, tagLen, isSuperTable); + updateOutputBufForTopBotQuery(pTableInfo, pTagCols, pExprs, numOfOutput, tagLen, isSuperTable); *pExprInfo = pExprs; return TSDB_CODE_SUCCESS; } -int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, - SSqlFuncMsg **pExprMsg, SExprInfo *prevExpr) { +// todo refactor +int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutput, SExprInfo** pExprInfo, + SSqlExpr** pExpr, SExprInfo* prevExpr) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -6271,8 +6553,14 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t nu bool isSuperTable = QUERY_IS_STABLE_QUERY(pQueryMsg->queryType); for (int32_t i = 0; i < numOfOutput; ++i) { - pExprs[i].base = *pExprMsg[i]; - pExprs[i].bytes = 0; + pExprs[i].base = *pExpr[i]; + memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); + + for (int32_t j = 0; j < pExpr[i]->numOfParams; ++j) { + tVariantAssign(&pExprs[i].base.param[j], &pExpr[i]->param[j]); + } + + pExprs[i].base.resType = 0; int16_t type = 0; int16_t bytes = 0; @@ -6292,18 +6580,18 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t nu int32_t index = pExprs[i].base.colInfo.colIndex; assert(prevExpr[index].base.resColId == pExprs[i].base.colInfo.colId); - type = prevExpr[index].type; - bytes = prevExpr[index].bytes; + type = prevExpr[index].base.resType; + bytes = prevExpr[index].base.resBytes; } - int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64; - if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].type, &pExprs[i].bytes, - &pExprs[i].interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { + int32_t param = (int32_t)pExprs[i].base.param[0].i64; + if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, + &pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } - assert(isValidDataType(pExprs[i].type)); + assert(isValidDataType(pExprs[i].base.resType)); } *pExprInfo = pExprs; @@ -6334,53 +6622,44 @@ SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex * return pGroupbyExpr; } -static int32_t createFilterInfo(SQuery *pQuery, uint64_t qId) { - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - if (pQuery->colList[i].numOfFilters > 0) { - pQuery->numOfFilterCols++; - } - } - - if (pQuery->numOfFilterCols == 0) { - return TSDB_CODE_SUCCESS; - } - - pQuery->pFilterInfo = calloc(1, sizeof(SSingleColumnFilterInfo) * pQuery->numOfFilterCols); - if (pQuery->pFilterInfo == NULL) { +static int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, + SSingleColumnFilterInfo** pFilterInfo, uint64_t qId) { + *pFilterInfo = calloc(1, sizeof(SSingleColumnFilterInfo) * numOfFilterCols); + if (pFilterInfo == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - for (int32_t i = 0, j = 0; i < pQuery->numOfCols; ++i) { - if (pQuery->colList[i].numOfFilters > 0) { - SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[j]; + for (int32_t i = 0, j = 0; i < numOfCols; ++i) { + if (pCols[i].flist.numOfFilters > 0) { + SSingleColumnFilterInfo* pFilter = &((*pFilterInfo)[j]); - memcpy(&pFilterInfo->info, &pQuery->colList[i], sizeof(SColumnInfo)); - pFilterInfo->info = pQuery->colList[i]; + memcpy(&pFilter->info, &pCols[i], sizeof(SColumnInfo)); + pFilter->info = pCols[i]; - pFilterInfo->numOfFilters = pQuery->colList[i].numOfFilters; - pFilterInfo->pFilters = calloc(pFilterInfo->numOfFilters, sizeof(SColumnFilterElem)); - if (pFilterInfo->pFilters == NULL) { + pFilter->numOfFilters = pCols[i].flist.numOfFilters; + pFilter->pFilters = calloc(pFilter->numOfFilters, sizeof(SColumnFilterElem)); + if (pFilter->pFilters == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - for (int32_t f = 0; f < pFilterInfo->numOfFilters; ++f) { - SColumnFilterElem *pSingleColFilter = &pFilterInfo->pFilters[f]; - pSingleColFilter->filterInfo = pQuery->colList[i].filters[f]; + for (int32_t f = 0; f < pFilter->numOfFilters; ++f) { + SColumnFilterElem* pSingleColFilter = &pFilter->pFilters[f]; + pSingleColFilter->filterInfo = pCols[i].flist.filterInfo[f]; int32_t lower = pSingleColFilter->filterInfo.lowerRelOptr; int32_t upper = pSingleColFilter->filterInfo.upperRelOptr; if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { - qError("QInfo:%"PRIu64" invalid filter info", qId); + qError("QInfo:0x%"PRIx64" invalid filter info", qId); return TSDB_CODE_QRY_INVALID_MSG; } pSingleColFilter->fp = getFilterOperator(lower, upper); if (pSingleColFilter->fp == NULL) { - qError("QInfo:%"PRIu64" invalid filter info", qId); + qError("QInfo:0x%"PRIx64" invalid filter info", qId); return TSDB_CODE_QRY_INVALID_MSG; } - pSingleColFilter->bytes = pQuery->colList[i].bytes; + pSingleColFilter->bytes = pCols[i].bytes; } j++; @@ -6390,11 +6669,39 @@ static int32_t createFilterInfo(SQuery *pQuery, uint64_t qId) { return TSDB_CODE_SUCCESS; } -static void doUpdateExprColumnIndex(SQuery *pQuery) { - assert(pQuery->pExpr1 != NULL && pQuery != NULL); +void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) { + for (int32_t i = 0; i < numOfFilterCols; ++i) { + if (pFilterInfo[i].numOfFilters > 0) { + tfree(pFilterInfo[i].pFilters); + } + } + + tfree(pFilterInfo); + return NULL; +} + +static int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId) { + for (int32_t i = 0; i < pQueryAttr->numOfCols; ++i) { + if (pQueryAttr->tableCols[i].flist.numOfFilters > 0) { + pQueryAttr->numOfFilterCols++; + } + } + + if (pQueryAttr->numOfFilterCols == 0) { + return TSDB_CODE_SUCCESS; + } + + doCreateFilterInfo(pQueryAttr->tableCols, pQueryAttr->numOfCols, pQueryAttr->numOfFilterCols, + &pQueryAttr->pFilterInfo, qId); + + return TSDB_CODE_SUCCESS; +} + +static void doUpdateExprColumnIndex(SQueryAttr *pQueryAttr) { + assert(pQueryAttr->pExpr1 != NULL && pQueryAttr != NULL); - for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - SSqlFuncMsg *pSqlExprMsg = &pQuery->pExpr1[k].base; + for (int32_t k = 0; k < pQueryAttr->numOfOutput; ++k) { + SSqlExpr *pSqlExprMsg = &pQueryAttr->pExpr1[k].base; if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM) { continue; } @@ -6403,34 +6710,34 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) { SColIndex *pColIndex = &pSqlExprMsg->colInfo; if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) { int32_t f = 0; - for (f = 0; f < pQuery->numOfCols; ++f) { - if (pColIndex->colId == pQuery->colList[f].colId) { + for (f = 0; f < pQueryAttr->numOfCols; ++f) { + if (pColIndex->colId == pQueryAttr->tableCols[f].colId) { pColIndex->colIndex = f; break; } } - assert(f < pQuery->numOfCols); + assert(f < pQueryAttr->numOfCols); } else if (pColIndex->colId <= TSDB_UD_COLUMN_INDEX) { // do nothing for user-defined constant value result columns } else if (pColIndex->colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { pColIndex->colIndex = 0;// only one source column, so it must be 0; - assert(pQuery->numOfOutput == 1); + assert(pQueryAttr->numOfOutput == 1); } else { int32_t f = 0; - for (f = 0; f < pQuery->numOfTags; ++f) { - if (pColIndex->colId == pQuery->tagColList[f].colId) { + for (f = 0; f < pQueryAttr->numOfTags; ++f) { + if (pColIndex->colId == pQueryAttr->tagColList[f].colId) { pColIndex->colIndex = f; break; } } - assert(f < pQuery->numOfTags || pColIndex->colId == TSDB_TBNAME_COLUMN_INDEX || pColIndex->colId == TSDB_BLOCK_DIST_COLUMN_INDEX); + assert(f < pQueryAttr->numOfTags || pColIndex->colId == TSDB_TBNAME_COLUMN_INDEX || pColIndex->colId == TSDB_BLOCK_DIST_COLUMN_INDEX); } } } -void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo) { +void setResultBufSize(SQueryAttr* pQueryAttr, SRspResultInfo* pResultInfo) { const int32_t DEFAULT_RESULT_MSG_SIZE = 1024 * (1024 + 512); // the minimum number of rows for projection query @@ -6439,8 +6746,8 @@ void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo) { const float THRESHOLD_RATIO = 0.85f; - if (isProjQuery(pQuery)) { - int32_t numOfRes = DEFAULT_RESULT_MSG_SIZE / pQuery->resultRowSize; + if (isProjQuery(pQueryAttr)) { + int32_t numOfRes = DEFAULT_RESULT_MSG_SIZE / pQueryAttr->resultRowSize; if (numOfRes < MIN_ROWS_FOR_PRJ_QUERY) { numOfRes = MIN_ROWS_FOR_PRJ_QUERY; } @@ -6459,7 +6766,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) { } SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs, - SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, + SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -6473,73 +6780,87 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr // to make sure third party won't overwrite this structure pQInfo->signature = pQInfo; - SQuery* pQuery = &pQInfo->query; - pQInfo->runtimeEnv.pQuery = pQuery; - - pQuery->tableGroupInfo = *pTableGroupInfo; - pQuery->numOfCols = numOfCols; - pQuery->numOfOutput = numOfOutput; - pQuery->limit.limit = pQueryMsg->limit; - pQuery->limit.offset = pQueryMsg->offset; - pQuery->order.order = pQueryMsg->order; - pQuery->order.orderColId = pQueryMsg->orderColId; - pQuery->pExpr1 = pExprs; - pQuery->pExpr2 = pSecExprs; - pQuery->numOfExpr2 = pQueryMsg->secondStageOutput; - pQuery->pGroupbyExpr = pGroupbyExpr; - memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval)); - pQuery->fillType = pQueryMsg->fillType; - pQuery->numOfTags = pQueryMsg->numOfTags; - pQuery->tagColList = pTagCols; - pQuery->prjInfo.vgroupLimit = pQueryMsg->vgroupLimit; - pQuery->prjInfo.ts = (pQueryMsg->order == TSDB_ORDER_ASC)? INT64_MIN:INT64_MAX; - pQuery->sw = pQueryMsg->sw; - pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); - if (pQuery->colList == NULL) { + SQueryAttr* pQueryAttr = &pQInfo->query; + pQInfo->runtimeEnv.pQueryAttr = pQueryAttr; + + pQueryAttr->tableGroupInfo = *pTableGroupInfo; + pQueryAttr->numOfCols = numOfCols; + pQueryAttr->numOfOutput = numOfOutput; + pQueryAttr->limit.limit = pQueryMsg->limit; + pQueryAttr->limit.offset = pQueryMsg->offset; + pQueryAttr->order.order = pQueryMsg->order; + pQueryAttr->order.orderColId = pQueryMsg->orderColId; + pQueryAttr->pExpr1 = pExprs; + pQueryAttr->pExpr2 = pSecExprs; + pQueryAttr->numOfExpr2 = pQueryMsg->secondStageOutput; + pQueryAttr->pGroupbyExpr = pGroupbyExpr; + memcpy(&pQueryAttr->interval, &pQueryMsg->interval, sizeof(pQueryAttr->interval)); + pQueryAttr->fillType = pQueryMsg->fillType; + pQueryAttr->numOfTags = pQueryMsg->numOfTags; + pQueryAttr->tagColList = pTagCols; + pQueryAttr->prjInfo.vgroupLimit = pQueryMsg->vgroupLimit; + pQueryAttr->prjInfo.ts = (pQueryMsg->order == TSDB_ORDER_ASC)? INT64_MIN:INT64_MAX; + pQueryAttr->sw = pQueryMsg->sw; + + pQueryAttr->stableQuery = pQueryMsg->stableQuery; + pQueryAttr->topBotQuery = pQueryMsg->topBotQuery; + pQueryAttr->groupbyColumn = pQueryMsg->groupbyColumn; + pQueryAttr->hasTagResults = pQueryMsg->hasTagResults; + pQueryAttr->timeWindowInterpo = pQueryMsg->timeWindowInterpo; + pQueryAttr->queryBlockDist = pQueryMsg->queryBlockDist; + pQueryAttr->stabledev = pQueryMsg->stabledev; + pQueryAttr->tsCompQuery = pQueryMsg->tsCompQuery; + pQueryAttr->simpleAgg = pQueryMsg->simpleAgg; + pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery; + pQueryAttr->needReverseScan = pQueryMsg->needReverseScan; + pQueryAttr->vgId = vgId; + + pQueryAttr->tableCols = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); + if (pQueryAttr->tableCols == NULL) { goto _cleanup; } - pQuery->srcRowSize = 0; - pQuery->maxSrcColumnSize = 0; + pQueryAttr->srcRowSize = 0; + pQueryAttr->maxTableColumnWidth = 0; for (int16_t i = 0; i < numOfCols; ++i) { - pQuery->colList[i] = pQueryMsg->colList[i]; - pQuery->colList[i].filters = tFilterInfoDup(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters); + pQueryAttr->tableCols[i] = pQueryMsg->tableCols[i]; + pQueryAttr->tableCols[i].flist.filterInfo = tFilterInfoDup(pQueryMsg->tableCols[i].flist.filterInfo, pQueryAttr->tableCols[i].flist.numOfFilters); - pQuery->srcRowSize += pQuery->colList[i].bytes; - if (pQuery->maxSrcColumnSize < pQuery->colList[i].bytes) { - pQuery->maxSrcColumnSize = pQuery->colList[i].bytes; + pQueryAttr->srcRowSize += pQueryAttr->tableCols[i].bytes; + if (pQueryAttr->maxTableColumnWidth < pQueryAttr->tableCols[i].bytes) { + pQueryAttr->maxTableColumnWidth = pQueryAttr->tableCols[i].bytes; } } // calculate the result row size for (int16_t col = 0; col < numOfOutput; ++col) { - assert(pExprs[col].bytes > 0); - pQuery->resultRowSize += pExprs[col].bytes; + assert(pExprs[col].base.resBytes > 0); + pQueryAttr->resultRowSize += pExprs[col].base.resBytes; // keep the tag length if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) { - pQuery->tagLen += pExprs[col].bytes; + pQueryAttr->tagLen += pExprs[col].base.resBytes; } - if (pExprs[col].pFilter) { - ++pQuery->havingNum; + if (pExprs[col].base.flist.filterInfo) { + ++pQueryAttr->havingNum; } } - doUpdateExprColumnIndex(pQuery); - int32_t ret = createFilterInfo(pQuery, pQInfo->qId); + doUpdateExprColumnIndex(pQueryAttr); + int32_t ret = createFilterInfo(pQueryAttr, pQInfo->qId); if (ret != TSDB_CODE_SUCCESS) { goto _cleanup; } - if (pQuery->fillType != TSDB_FILL_NONE) { - pQuery->fillVal = malloc(sizeof(int64_t) * pQuery->numOfOutput); - if (pQuery->fillVal == NULL) { + if (pQueryAttr->fillType != TSDB_FILL_NONE) { + pQueryAttr->fillVal = malloc(sizeof(int64_t) * pQueryAttr->numOfOutput); + if (pQueryAttr->fillVal == NULL) { goto _cleanup; } // the first column is the timestamp - memcpy(pQuery->fillVal, (char *)pQueryMsg->fillVal, pQuery->numOfOutput * sizeof(int64_t)); + memcpy(pQueryAttr->fillVal, (char *)pQueryMsg->fillVal, pQueryAttr->numOfOutput * sizeof(int64_t)); } size_t numOfGroups = 0; @@ -6563,17 +6884,15 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr pthread_mutex_init(&pQInfo->lock, NULL); tsem_init(&pQInfo->ready, 0, 0); - pQuery->window = pQueryMsg->window; - changeExecuteScanOrder(pQInfo, pQueryMsg, stableQuery); + pQueryAttr->window = pQueryMsg->window; + changeExecuteScanOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery); SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - bool groupByCol = isGroupbyColumn(pQuery->pGroupbyExpr); - - STimeWindow window = pQuery->window; + STimeWindow window = pQueryAttr->window; int32_t index = 0; for(int32_t i = 0; i < numOfGroups; ++i) { - SArray* pa = taosArrayGetP(pQuery->tableGroupInfo.pGroupList, i); + SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); size_t s = taosArrayGetSize(pa); SArray* p1 = taosArrayInit(s, POINTER_BYTES); @@ -6588,7 +6907,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr window.skey = info->lastKey; void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); - STableQueryInfo* item = createTableQueryInfo(pQuery, info->pTable, groupByCol, window, buf); + STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf); if (item == NULL) { goto _cleanup; } @@ -6602,12 +6921,12 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr } } - colIdCheck(pQuery, pQInfo->qId); + colIdCheck(pQueryAttr, pQInfo->qId); // todo refactor pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); - qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo); + qDebug("qmsg:%p QInfo:0x%" PRIx64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo); return pQInfo; _cleanup_qinfo: @@ -6626,8 +6945,8 @@ _cleanup_qinfo: pExprInfo->pExpr = NULL; } - if (pExprInfo->pFilter) { - freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum); + if (pExprInfo->base.flist.filterInfo) { + freeColumnFilterInfo(pExprInfo->base.flist.filterInfo, pExprInfo->base.flist.numOfFilters); } } @@ -6652,38 +6971,39 @@ bool isValidQInfo(void *param) { return (sig == (uint64_t)pQInfo); } -int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable) { +int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start, + int32_t prevResultLen, void* merger) { int32_t code = TSDB_CODE_SUCCESS; SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; pRuntimeEnv->qinfo = pQInfo; - SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; STSBuf *pTsBuf = NULL; - if (pQueryMsg->tsLen > 0) { // open new file to save the result - char *tsBlock = (char *) pQueryMsg + pQueryMsg->tsOffset; - pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); + if (pTsBufInfo->tsLen > 0) { // open new file to save the result + char *tsBlock = start + pTsBufInfo->tsOffset; + pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder, + pQueryAttr->vgId); tsBufResetPos(pTsBuf); bool ret = tsBufNextPos(pTsBuf); - UNUSED(ret); } SArray* prevResult = NULL; - if (pQueryMsg->prevResultLen > 0) { - prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen); - - pRuntimeEnv->prevResult = prevResult; + if (prevResultLen > 0) { + prevResult = interResFromBinary(param->prevResult, prevResultLen); } - pQuery->precision = tsdbGetCfg(tsdb)->precision; + if (tsdb != NULL) { + pQueryAttr->precision = tsdbGetCfg(tsdb)->precision; + } - if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.skey > pQuery->window.ekey)) || - (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.ekey > pQuery->window.skey))) { - qDebug("QInfo:%"PRIu64" no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo->qId, pQuery->window.skey, - pQuery->window.ekey, pQuery->order.order); + if ((QUERY_IS_ASC_QUERY(pQueryAttr) && (pQueryAttr->window.skey > pQueryAttr->window.ekey)) || + (!QUERY_IS_ASC_QUERY(pQueryAttr) && (pQueryAttr->window.ekey > pQueryAttr->window.skey))) { + qDebug("QInfo:0x%"PRIx64" no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo->qId, pQueryAttr->window.skey, + pQueryAttr->window.ekey, pQueryAttr->order.order); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); pRuntimeEnv->tableqinfoGroupInfo.numOfTables = 0; // todo free memory @@ -6691,13 +7011,13 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p } if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%"PRIu64" no table qualified for tag filter, abort query", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" no table qualified for tag filter, abort query", pQInfo->qId); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); return TSDB_CODE_SUCCESS; } // filter the qualified - if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { + if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, sourceOptr, param->tableScanOperator, param->pOperator, merger)) != TSDB_CODE_SUCCESS) { goto _error; } @@ -6709,6 +7029,7 @@ _error: return code; } +//TODO refactor void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { if (pFilter == NULL || numOfFilters == 0) { return; @@ -6758,8 +7079,12 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { tExprTreeDestroy(pExprInfo[i].pExpr, NULL); } - if (pExprInfo[i].pFilter) { - freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum); + if (pExprInfo[i].base.flist.filterInfo) { + freeColumnFilterInfo(pExprInfo[i].base.flist.filterInfo, pExprInfo[i].base.flist.numOfFilters); + } + + for(int32_t j = 0; j < pExprInfo[i].base.numOfParams; ++j) { + tVariantDestroy(&pExprInfo[i].base.param[j]); } } @@ -6772,58 +7097,26 @@ void freeQInfo(SQInfo *pQInfo) { return; } - qDebug("QInfo:%"PRIu64" start to free QInfo", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" start to free QInfo", pQInfo->qId); SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables); doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo); - teardownQueryRuntimeEnv(&pQInfo->runtimeEnv); - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - if (pQuery != NULL) { - if (pQuery->fillVal != NULL) { - tfree(pQuery->fillVal); - } - - for (int32_t i = 0; i < pQuery->numOfFilterCols; ++i) { - SSingleColumnFilterInfo *pColFilter = &pQuery->pFilterInfo[i]; - if (pColFilter->numOfFilters > 0) { - tfree(pColFilter->pFilters); - } - } - - pQuery->pExpr1 = destroyQueryFuncExpr(pQuery->pExpr1, pQuery->numOfOutput); - pQuery->pExpr2 = destroyQueryFuncExpr(pQuery->pExpr2, pQuery->numOfExpr2); - - tfree(pQuery->tagColList); - tfree(pQuery->pFilterInfo); - - if (pQuery->colList != NULL) { - for (int32_t i = 0; i < pQuery->numOfCols; i++) { - SColumnInfo *column = pQuery->colList + i; - freeColumnFilterInfo(column->filters, column->numOfFilters); - } - tfree(pQuery->colList); - } - - if (pQuery->pGroupbyExpr != NULL) { - taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo); - tfree(pQuery->pGroupbyExpr); - } - } + SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; + freeQueryAttr(pQueryAttr); + tsdbDestroyTableGroup(&pQueryAttr->tableGroupInfo); tfree(pQInfo->pBuf); tfree(pQInfo->sql); - tsdbDestroyTableGroup(&pQuery->tableGroupInfo); - taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows); pQInfo->signature = 0; - qDebug("QInfo:%"PRIu64" QInfo is freed", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" QInfo is freed", pQInfo->qId); tfree(pQInfo); } @@ -6831,10 +7124,10 @@ void freeQInfo(SQInfo *pQInfo) { int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { // the remained number of retrieved rows, not the interpolated result SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; // load data from file to msg buffer - if (isTsCompQuery(pQuery)) { + if (pQueryAttr->tsCompQuery) { SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0); FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor @@ -6843,7 +7136,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { off_t s = lseek(fileno(f), 0, SEEK_END); assert(s == pRuntimeEnv->outputBuf->info.rows); - qDebug("QInfo:%"PRIu64" ts comp data return, file:%p, size:%"PRId64, pQInfo->qId, f, (uint64_t)s); + qDebug("QInfo:0x%"PRIx64" ts comp data return, file:%p, size:%"PRId64, pQInfo->qId, f, (uint64_t)s); if (fseek(f, 0, SEEK_SET) >= 0) { size_t sz = fread(data, 1, s, f); if(sz < s) { // todo handle error @@ -6863,6 +7156,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { } fclose(f); + *(FILE **)pColInfoData->pData = NULL; } // all data returned, set query over @@ -6874,11 +7168,11 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { } pRuntimeEnv->resultInfo.total += pRuntimeEnv->outputBuf->info.rows; - qDebug("QInfo:%"PRIu64" current numOfRes rows:%d, total:%" PRId64, pQInfo->qId, + qDebug("QInfo:0x%"PRIx64" current numOfRes rows:%d, total:%" PRId64, pQInfo->qId, pRuntimeEnv->outputBuf->info.rows, pRuntimeEnv->resultInfo.total); - if (pQuery->limit.limit > 0 && pQuery->limit.limit == pRuntimeEnv->resultInfo.total) { - qDebug("QInfo:%"PRIu64" results limitation reached, limitation:%"PRId64, pQInfo->qId, pQuery->limit.limit); + if (pQueryAttr->limit.limit > 0 && pQueryAttr->limit.limit == pRuntimeEnv->resultInfo.total) { + qDebug("QInfo:0x%"PRIx64" results limitation reached, limitation:%"PRId64, pQInfo->qId, pQueryAttr->limit.limit); setQueryStatus(pRuntimeEnv, QUERY_OVER); } @@ -6966,3 +7260,34 @@ void releaseQueryBuf(size_t numOfTables) { // restore value is not enough buffer available atomic_add_fetch_64(&tsQueryBufferSizeBytes, t); } + +void freeQueryAttr(SQueryAttr* pQueryAttr) { + if (pQueryAttr != NULL) { + if (pQueryAttr->fillVal != NULL) { + tfree(pQueryAttr->fillVal); + } + + pQueryAttr->pFilterInfo = doDestroyFilterInfo(pQueryAttr->pFilterInfo, pQueryAttr->numOfFilterCols); + + pQueryAttr->pExpr1 = destroyQueryFuncExpr(pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pQueryAttr->pExpr2 = destroyQueryFuncExpr(pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); + pQueryAttr->pExpr3 = destroyQueryFuncExpr(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3); + + tfree(pQueryAttr->tagColList); + tfree(pQueryAttr->pFilterInfo); + + if (pQueryAttr->tableCols != NULL) { + for (int32_t i = 0; i < pQueryAttr->numOfCols; i++) { + SColumnInfo* column = pQueryAttr->tableCols + i; + freeColumnFilterInfo(column->flist.filterInfo, column->flist.numOfFilters); + } + tfree(pQueryAttr->tableCols); + } + + if (pQueryAttr->pGroupbyExpr != NULL) { + taosArrayDestroy(pQueryAttr->pGroupbyExpr->columnInfo); + tfree(pQueryAttr->pGroupbyExpr); + } + } +} + diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index a73f38528266bfa70790414a2963eb0a8290d7b9..1fe2819ea2347f8e092a3955f6d4c3269e148294 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -20,6 +20,7 @@ #include "taosdef.h" #include "taosmsg.h" #include "tulog.h" +#include "qExecutor.h" #define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ (data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes) @@ -266,6 +267,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); if (retVal <= 0) { // failed to write to buffer, may be not enough space ret = TAOS_SYSTEM_ERROR(errno); + pMemBuffer->pHead = first; return ret; } @@ -351,47 +353,28 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t } } -static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { +static int32_t tsCompareFunc(TSKEY k1, TSKEY k2, int32_t order) { + if (k1 == k2) { + return 0; + } + + if (order == TSDB_ORDER_DESC) { + return (k1 < k2)? 1:-1; + } else { + return (k1 < k2)? -1:1; + } +} + +int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { switch (type) { - case TSDB_DATA_TYPE_INT: { - int32_t first = *(int32_t *) f1; - int32_t second = *(int32_t *) f2; - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; - }; - case TSDB_DATA_TYPE_DOUBLE: { - DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); - }; - case TSDB_DATA_TYPE_FLOAT: { - DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); - }; - case TSDB_DATA_TYPE_BIGINT: { - int64_t first = *(int64_t *)f1; - int64_t second = *(int64_t *)f2; - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; - }; - case TSDB_DATA_TYPE_SMALLINT: { - int16_t first = *(int16_t *)f1; - int16_t second = *(int16_t *)f2; - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; - }; + case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2)); + case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); + case TSDB_DATA_TYPE_FLOAT: DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); + case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2)); + case TSDB_DATA_TYPE_SMALLINT:DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2)); case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: { - int8_t first = *(int8_t *)f1; - int8_t second = *(int8_t *)f2; - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; - }; + case TSDB_DATA_TYPE_TINYINT: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2)); + case TSDB_DATA_TYPE_BINARY: { int32_t len1 = varDataLen(f1); int32_t len2 = varDataLen(f2); @@ -414,6 +397,10 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i } return (ret < 0) ? -1 : 1; }; + case TSDB_DATA_TYPE_UTINYINT: DEFAULT_COMP(GET_UINT8_VAL(f1), GET_UINT8_VAL(f2)); + case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2)); + case TSDB_DATA_TYPE_UINT: DEFAULT_COMP(GET_UINT32_VAL(f1), GET_UINT32_VAL(f2)); + case TSDB_DATA_TYPE_UBIGINT: DEFAULT_COMP(GET_UINT64_VAL(f1), GET_UINT64_VAL(f2)); } return 0; @@ -451,6 +438,35 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, return 0; } +int32_t compare_aRv(SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order) { + for (int32_t i = 0; i < numOfCols; ++i) { + SColIndex* pColIndex = taosArrayGet(colIndex, i); + int32_t index = pColIndex->colIndex; + + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index); + assert(pColIndex->colId == pColInfo->info.colId); + + char* data = pColInfo->pData + rowIndex * pColInfo->info.bytes; + if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + int32_t ret = tsCompareFunc(GET_INT64_VAL(data), GET_INT64_VAL(buffer[i]), order); + if (ret == 0) { + continue; // The timestamps are identical + } else { + return ret; + } + } else { + int32_t ret = columnValueAscendingComparator(data, buffer[i], pColInfo->info.type, pColInfo->info.bytes); + if (ret == 0) { + continue; + } else { + return ret; + } + } + } + + return 0; +} + int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, char *data1, int32_t numOfRows2, int32_t s2, char *data2) { assert(numOfRows1 == numOfRows2); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 2de1029396fb779798ea7c20da939b2153ca3dd3..fa572029fc043fc13b9822f1e688696ca9a0a225 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -31,7 +31,7 @@ static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) { for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) { SFillColInfo* pCol = &pFillInfo->pFillCol[j]; - if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) { + if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || TSDB_COL_IS_UD_COL(pCol->flag)) { continue; } @@ -126,10 +126,10 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData } else { setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } - } else { /* fill the default value */ + } else { // fill the default value */ for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { + if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { continue; } @@ -210,7 +210,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR // assign rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { + if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { continue; } @@ -275,13 +275,16 @@ static int64_t appendFilledResult(SFillInfo* pFillInfo, void** output, int64_t r // there are no duplicated tags in the SFillTagColInfo list static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) { int32_t rowsize = 0; + int32_t numOfTags = 0; int32_t k = 0; for (int32_t i = 0; i < numOfCols; ++i) { SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; pFillInfo->pData[i] = NULL; - if (TSDB_COL_IS_TAG(pColInfo->flag)) { + if (TSDB_COL_IS_TAG(pColInfo->flag) || pColInfo->col.type == TSDB_DATA_TYPE_BINARY) { + numOfTags += 1; + bool exists = false; int32_t index = -1; for (int32_t j = 0; j < k; ++j) { @@ -310,6 +313,8 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t rowsize += pColInfo->col.bytes; } + pFillInfo->numOfTags = numOfTags; + assert(k <= pFillInfo->numOfTags); return rowsize; } @@ -347,12 +352,13 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3 pFillInfo->interval.slidingUnit = slidingUnit; pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); - if (numOfTags > 0) { - pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); - for (int32_t i = 0; i < numOfTags; ++i) { + +// if (numOfTags > 0) { + pFillInfo->pTags = calloc(numOfCols, sizeof(SFillTagColInfo)); + for (int32_t i = 0; i < numOfCols; ++i) { pFillInfo->pTags[i].col.colId = -2; // TODO } - } +// } pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc); assert(pFillInfo->rowSize > 0); @@ -367,6 +373,7 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3 void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) { pFillInfo->start = startTimestamp; pFillInfo->currentKey = startTimestamp; + pFillInfo->end = startTimestamp; pFillInfo->index = -1; pFillInfo->numOfRows = 0; pFillInfo->numOfCurrent = 0; @@ -425,6 +432,8 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i); // pFillInfo->pData[i] = pColData->pData; if (pInput->info.rows > pFillInfo->alloc) { @@ -436,6 +445,12 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) } memcpy(pFillInfo->pData[i], pColData->pData, pColData->info.bytes * pInput->info.rows); + + if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer + SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; + assert (pTag->col.colId == pCol->col.colId); + memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy?? + } } } @@ -456,7 +471,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* memcpy(pFillInfo->pData[i], data, (size_t)(pCol->col.bytes * pInput->num)); - if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer + if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; assert (pTag->col.colId == pCol->col.colId); memcpy(pTag->tagVal, data, pCol->col.bytes); // TODO not memcpy?? @@ -465,7 +480,17 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* } bool taosFillHasMoreResults(SFillInfo* pFillInfo) { - return taosNumOfRemainRows(pFillInfo) > 0; + int32_t remain = taosNumOfRemainRows(pFillInfo); + if (remain > 0) { + return true; + } + + if (pFillInfo->numOfTotal > 0 && (((pFillInfo->end > pFillInfo->start) && FILL_IS_ASC_FILL(pFillInfo)) || + (pFillInfo->end < pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo)))) { + return getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, 4096) > 0; + } + + return false; } int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c new file mode 100644 index 0000000000000000000000000000000000000000..0554a887ec68ca3353b5e45c1067ea734ea98cfd --- /dev/null +++ b/src/query/src/qPlan.c @@ -0,0 +1,195 @@ +#include "os.h" +#include "tsclient.h" +#include "qUtil.h" +#include "texpr.h" + +#define QNODE_PROJECT 1 +#define QNODE_FILTER 2 +#define QNODE_RELATION 3 +#define QNODE_AGGREGATE 4 +#define QNODE_GROUPBY 5 +#define QNODE_LIMIT 6 +#define QNODE_JOIN 7 +#define QNODE_DIST 8 +#define QNODE_SORT 9 +#define QNODE_UNIONALL 10 +#define QNODE_TIMEWINDOW 11 + +typedef struct SQueryNode { + int32_t type; // the type of logic node + char *name; // the name of logic node + + SSchema *pSchema; // the schema of the input SSDatablock + int32_t numOfCols; // number of input columns + SExprInfo *pExpr; // the query functions or sql aggregations + int32_t numOfOutput; // number of result columns, which is also the number of pExprs + + // previous operator to generated result for current node to process + // in case of join, multiple prev nodes exist. + struct SQueryNode* prevNode; + struct SQueryNode* nextNode; +} SQueryNode; + +// TODO create the query plan +SQueryNode* qCreateQueryPlan(SQueryInfo* pQueryInfo) { + return NULL; +} + +char* queryPlanToString() { + return NULL; +} + +SQueryNode* queryPlanFromString() { + return NULL; +} + +SArray* createTableScanPlan(SQueryAttr* pQueryAttr) { + SArray* plan = taosArrayInit(4, sizeof(int32_t)); + + int32_t op = 0; + if (onlyQueryTags(pQueryAttr)) { +// op = OP_TagScan; + } else { + if (pQueryAttr->queryBlockDist) { + op = OP_TableBlockInfoScan; + } else if (pQueryAttr->tsCompQuery || pQueryAttr->pointInterpQuery) { + op = OP_TableSeqScan; + } else if (pQueryAttr->needReverseScan) { + op = OP_DataBlocksOptScan; + } else { + op = OP_TableScan; + } + + taosArrayPush(plan, &op); + } + + return plan; +} + +SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { + SArray* plan = taosArrayInit(4, sizeof(int32_t)); + int32_t op = 0; + + if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query + op = OP_TagScan; + taosArrayPush(plan, &op); + if (pQueryAttr->distinctTag) { + op = OP_Distinct; + taosArrayPush(plan, &op); + } + } else if (pQueryAttr->interval.interval > 0) { + if (pQueryAttr->stableQuery) { + op = OP_MultiTableTimeInterval; + taosArrayPush(plan, &op); + } else { + op = OP_TimeWindow; + taosArrayPush(plan, &op); + + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + op = OP_Fill; + taosArrayPush(plan, &op); + } + } + + } else if (pQueryAttr->groupbyColumn) { + op = OP_Groupby; + taosArrayPush(plan, &op); + + if (!pQueryAttr->stableQuery && pQueryAttr->havingNum > 0) { + op = OP_Filter; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + } else if (pQueryAttr->sw.gap > 0) { + op = OP_SessionWindow; + taosArrayPush(plan, &op); + + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + } else if (pQueryAttr->simpleAgg) { + if (pQueryAttr->stableQuery && !pQueryAttr->tsCompQuery) { + op = OP_MultiTableAggregate; + } else { + op = OP_Aggregate; + } + + taosArrayPush(plan, &op); + + if (!pQueryAttr->stableQuery && pQueryAttr->havingNum > 0) { + op = OP_Filter; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) { + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + } else { // diff/add/multiply/subtract/division + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { + op = OP_Limit; + taosArrayPush(plan, &op); + } + + return plan; +} + +SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { + SArray* plan = taosArrayInit(4, sizeof(int32_t)); + + if (!pQueryAttr->stableQuery) { + return plan; + } + + int32_t op = OP_MultiwayMergeSort; + taosArrayPush(plan, &op); + + if (pQueryAttr->distinctTag) { + op = OP_Distinct; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->simpleAgg || (pQueryAttr->interval.interval > 0 || pQueryAttr->sw.gap > 0)) { + op = OP_GlobalAggregate; + taosArrayPush(plan, &op); + + if (pQueryAttr->havingNum > 0) { + op = OP_Filter; + taosArrayPush(plan, &op); + } + + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Arithmetic; + taosArrayPush(plan, &op); + } + } + + // fill operator + if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + op = OP_Fill; + taosArrayPush(plan, &op); + } + + // limit/offset operator + if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0 || + pQueryAttr->slimit.limit > 0 || pQueryAttr->slimit.offset > 0) { + op = OP_SLimit; + taosArrayPush(plan, &op); + } + + return plan; +} diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index f83caf2d8f006a0b3932d09c217523f8d35cb826..05ecf2e9b1163e1f858bb6a1b08e0df7eaabcab0 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -43,7 +43,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa pResBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t)); - qDebug("QInfo:%"PRIu64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize, + qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize, pResBuf->inMemPages, pResBuf->path); return TSDB_CODE_SUCCESS; @@ -410,13 +410,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { } if (pResultBuf->file != NULL) { - qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb", + qDebug("QInfo:0x%"PRIx64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb", pResultBuf->qId, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0, pResultBuf->fileSize/1024.0); fclose(pResultBuf->file); } else { - qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId, + qDebug("QInfo:0x%"PRIx64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId, pResultBuf->totalBufSize/1024.0); } diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index b75032967abde595bccea6d6577465b0870791c2..fb8f164ed3f2920767c61dd08969a7435002f9d4 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -144,12 +144,15 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond pSqlExpr->type = SQL_NODE_VALUE; + pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP; } else if (optrType == TK_VARIABLE) { int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64); if (ret != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; } + pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP; + pSqlExpr->flags |= 1 << EXPR_FLAG_TIMESTAMP_VAR; pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pSqlExpr->tokenId = TK_TIMESTAMP; pSqlExpr->type = SQL_NODE_VALUE; @@ -217,6 +220,15 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pExpr->tokenId = pLeft->tokenId; pExpr->type = SQL_NODE_VALUE; + pExpr->flags = pLeft->flags | pRight->flags; + + if ((pLeft->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR)) && (pRight->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR))) { + pExpr->flags |= 1 << EXPR_FLAG_TS_ERROR; + } else { + pExpr->flags &= ~(1 << EXPR_FLAG_TIMESTAMP_VAR); + pExpr->flags &= ~(1 << EXPR_FLAG_TS_ERROR); + } + switch (optrType) { case TK_PLUS: { @@ -245,7 +257,6 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { tSqlExprDestroy(pLeft); tSqlExprDestroy(pRight); - } else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) || (pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) || (pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) { @@ -319,7 +330,7 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { if ((left == NULL && right) || (left && right == NULL)) { return 1; } - + if (left->type != right->type) { return 1; } @@ -332,11 +343,11 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { return 1; } - if ((left->pLeft && right->pLeft == NULL) + if ((left->pLeft && right->pLeft == NULL) || (left->pLeft == NULL && right->pLeft) - || (left->pRight && right->pRight == NULL) + || (left->pRight && right->pRight == NULL) || (left->pRight == NULL && right->pRight) - || (left->pParam && right->pParam == NULL) + || (left->pParam && right->pParam == NULL) || (left->pParam == NULL && right->pParam)) { return 1; } @@ -349,19 +360,18 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { return 1; } - if (right->pParam && left->pParam) { size_t size = taosArrayGetSize(right->pParam); if (left->pParam && taosArrayGetSize(left->pParam) != size) { return 1; } - for (int32_t i = 0; i < size; i++) { + for (int32_t i = 0; i < size; i++) { tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i); tSqlExpr* pSubLeft = pLeftElem->pNode; - tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i); + tSqlExprItem* pRightElem = taosArrayGet(right->pParam, i); tSqlExpr* pSubRight = pRightElem->pNode; - + if (tSqlExprCompare(pSubLeft, pSubRight)) { return 1; } @@ -523,13 +533,13 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int return pList; } -SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias) { - if (pFromInfo == NULL) { - pFromInfo = calloc(1, sizeof(SFromInfo)); - pFromInfo->tableList = taosArrayInit(4, sizeof(STableNamePair)); +SRelationInfo *setTableNameList(SRelationInfo* pRelationInfo, SStrToken *pName, SStrToken* pAlias) { + if (pRelationInfo == NULL) { + pRelationInfo = calloc(1, sizeof(SRelationInfo)); + pRelationInfo->list = taosArrayInit(4, sizeof(STableNamePair)); } - pFromInfo->type = SQL_NODE_FROM_NAMELIST; + pRelationInfo->type = SQL_NODE_FROM_TABLELIST; STableNamePair p = {.name = *pName}; if (pAlias != NULL) { p.aliasName = *pAlias; @@ -537,34 +547,39 @@ SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* p TPARSER_SET_NONE_TOKEN(p.aliasName); } - taosArrayPush(pFromInfo->tableList, &p); - - return pFromInfo; + taosArrayPush(pRelationInfo->list, &p); + return pRelationInfo; } -SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode* pSqlNode) { - if (pFromInfo == NULL) { - pFromInfo = calloc(1, sizeof(SFromInfo)); +SRelationInfo* setSubquery(SRelationInfo* pRelationInfo, SArray* pList) { + if (pRelationInfo == NULL) { + pRelationInfo = calloc(1, sizeof(SRelationInfo)); + pRelationInfo->list = taosArrayInit(4, POINTER_BYTES); } - pFromInfo->type = SQL_NODE_FROM_SUBQUERY; - pFromInfo->pNode->pClause[pFromInfo->pNode->numOfClause - 1] = pSqlNode; + pRelationInfo->type = SQL_NODE_FROM_SUBQUERY; + taosArrayPush(pRelationInfo->list, &pList); - return pFromInfo; + return pRelationInfo; } -void* destroyFromInfo(SFromInfo* pFromInfo) { - if (pFromInfo == NULL) { +void* destroyRelationInfo(SRelationInfo* pRelationInfo) { + if (pRelationInfo == NULL) { return NULL; } - if (pFromInfo->type == SQL_NODE_FROM_NAMELIST) { - taosArrayDestroy(pFromInfo->tableList); + if (pRelationInfo->type == SQL_NODE_FROM_TABLELIST) { + taosArrayDestroy(pRelationInfo->list); } else { - destroyAllSelectClause(pFromInfo->pNode); + size_t size = taosArrayGetSize(pRelationInfo->list); + for(int32_t i = 0; i < size; ++i) { + SArray* pa = taosArrayGetP(pRelationInfo->list, 0); + destroyAllSqlNode(pa); + } + taosArrayDestroy(pRelationInfo->list); } - tfree(pFromInfo); + tfree(pRelationInfo); return NULL; } @@ -708,19 +723,19 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { /* * extract the select info out of sql string */ -SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, +SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *psLimit, tSqlExpr *pHaving) { - assert(pSelectList != NULL); + assert(pSelNodeList != NULL); - SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode)); + SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode)); // all later sql string are belonged to the stream sql pSqlNode->sqlstr = *pSelectToken; pSqlNode->sqlstr.n = (uint32_t)strlen(pSqlNode->sqlstr.z); - pSqlNode->pSelectList = pSelectList; + pSqlNode->pSelNodeList = pSelNodeList; pSqlNode->from = pFrom; pSqlNode->pGroupby = pGroupby; pSqlNode->pSortOrder = pSortOrder; @@ -778,49 +793,47 @@ void freeCreateTableInfo(void* p) { tfree(pInfo->tagdata.data); } -void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) { - if (pQuerySql == NULL) { +void destroySqlNode(SSqlNode *pSqlNode) { + if (pSqlNode == NULL) { return; } - tSqlExprListDestroy(pQuerySql->pSelectList); - - pQuerySql->pSelectList = NULL; - - tSqlExprDestroy(pQuerySql->pWhere); - pQuerySql->pWhere = NULL; + tSqlExprListDestroy(pSqlNode->pSelNodeList); + pSqlNode->pSelNodeList = NULL; - tSqlExprDestroy(pQuerySql->pHaving); - pQuerySql->pHaving = NULL; + tSqlExprDestroy(pSqlNode->pWhere); + pSqlNode->pWhere = NULL; - taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); - pQuerySql->pSortOrder = NULL; + taosArrayDestroyEx(pSqlNode->pSortOrder, freeVariant); + pSqlNode->pSortOrder = NULL; - taosArrayDestroyEx(pQuerySql->pGroupby, freeVariant); - pQuerySql->pGroupby = NULL; + taosArrayDestroyEx(pSqlNode->pGroupby, freeVariant); + pSqlNode->pGroupby = NULL; - pQuerySql->from = destroyFromInfo(pQuerySql->from); + pSqlNode->from = destroyRelationInfo(pSqlNode->from); - taosArrayDestroyEx(pQuerySql->fillType, freeVariant); - pQuerySql->fillType = NULL; + taosArrayDestroyEx(pSqlNode->fillType, freeVariant); + pSqlNode->fillType = NULL; - free(pQuerySql); + tSqlExprDestroy(pSqlNode->pHaving); + free(pSqlNode); } -void destroyAllSelectClause(SSubclauseInfo *pClause) { - if (pClause == NULL || pClause->numOfClause == 0) { +void destroyAllSqlNode(SArray *pList) { + if (pList == NULL) { return; } - for(int32_t i = 0; i < pClause->numOfClause; ++i) { - SQuerySqlNode *pQuerySql = pClause->pClause[i]; - destroyQuerySqlNode(pQuerySql); + size_t size = taosArrayGetSize(pList); + for(int32_t i = 0; i < size; ++i) { + SSqlNode *pNode = taosArrayGetP(pList, i); + destroySqlNode(pNode); } - - tfree(pClause->pClause); + + taosArrayDestroy(pList); } -SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type) { +SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type) { SCreateTableSql *pCreate = calloc(1, sizeof(SCreateTableSql)); switch (type) { @@ -888,7 +901,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray } void* destroyCreateTableSql(SCreateTableSql* pCreate) { - destroyQuerySqlNode(pCreate->pSelect); + destroySqlNode(pCreate->pSelect); taosArrayDestroy(pCreate->colInfo.pColumns); taosArrayDestroy(pCreate->colInfo.pTagColumns); @@ -903,7 +916,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) { if (pInfo == NULL) return; if (pInfo->type == TSDB_SQL_SELECT) { - destroyAllSelectClause(&pInfo->subclauseInfo); + destroyAllSqlNode(pInfo->list); } else if (pInfo->type == TSDB_SQL_CREATE_TABLE) { pInfo->pCreateTableInfo = destroyCreateTableSql(pInfo->pCreateTableInfo); } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { @@ -924,31 +937,20 @@ void SqlInfoDestroy(SSqlInfo *pInfo) { } } -SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) { - if (pSubclause == NULL) { - pSubclause = calloc(1, sizeof(SSubclauseInfo)); - } - - int32_t newSize = pSubclause->numOfClause + 1; - char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES); - if (tmp == NULL) { - return pSubclause; +SArray* setSubclause(SArray* pList, void *pSqlNode) { + if (pList == NULL) { + pList = taosArrayInit(1, POINTER_BYTES); } - pSubclause->pClause = (SQuerySqlNode**) tmp; - - pSubclause->pClause[newSize - 1] = pSqlExprInfo; - pSubclause->numOfClause++; - - return pSubclause; + taosArrayPush(pList, &pSqlNode); + return pList; } SSqlInfo* setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type) { pInfo->type = type; if (type == TSDB_SQL_SELECT) { - pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo; - free(pSqlExprInfo); + pInfo->list = (SArray*) pSqlExprInfo; } else { pInfo->pCreateTableInfo = pSqlExprInfo; } @@ -960,16 +962,9 @@ SSqlInfo* setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, return pInfo; } -SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) { - char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES); - if (tmp == NULL) { // out of memory - return pQueryInfo; - } - - pQueryInfo->pClause = (SQuerySqlNode**) tmp; - pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause; - - return pQueryInfo; +SArray* appendSelectClause(SArray *pList, void *pSubclause) { + taosArrayPush(pList, &pSubclause); + return pList; } void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists) { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index aa793add840351311bf23d37d39ba06945e583e4..7ff2d169623e99e676b21402b621161bcd49a2eb 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -30,11 +30,11 @@ typedef struct SCompSupporter { int32_t order; } SCompSupporter; -int32_t getOutputInterResultBufSize(SQuery* pQuery) { +int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) { int32_t size = 0; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - size += pQuery->pExpr1[i].interBytes; + for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + size += pQueryAttr->pExpr1[i].base.interBytes; } assert(size >= 0); @@ -136,11 +136,11 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResultRow->pageId); int16_t offset = 0; - for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) { + for (int32_t i = 0; i < pRuntimeEnv->pQueryAttr->numOfOutput; ++i) { SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i]; - int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; - char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size); + int16_t size = pRuntimeEnv->pQueryAttr->pExpr1[i].base.resType; + char * s = getPosInResultPage(pRuntimeEnv->pQueryAttr, page, pResultRow->offset, offset); memset(s, 0, size); offset += size; @@ -164,8 +164,8 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t } size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) { - SQuery* pQuery = pRuntimeEnv->pQuery; - return (pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pQuery->interBufSize + sizeof(SResultRow); + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + return (pQueryAttr->numOfOutput * sizeof(SResultRowCellInfo)) + pQueryAttr->interBufSize + sizeof(SResultRow); } SResultRowPool* initResultRowPool(size_t size) { @@ -382,10 +382,10 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { } static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow, int32_t* rowCellInfoOffset) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pExpr1[j].base.functionId; + for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) { + int32_t functionId = pQueryAttr->pExpr1[j].base.functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -448,7 +448,7 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, int32_t* rowCellInfoOffset) { - bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery); + bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr); int32_t code = TSDB_CODE_SUCCESS; @@ -484,7 +484,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes goto _end; } - SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order}; + SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQueryAttr->order.order}; int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); if (ret != TSDB_CODE_SUCCESS) { @@ -537,7 +537,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes int64_t endt = taosGetTimestampMs(); - qDebug("QInfo:%"PRIu64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv), + qDebug("QInfo:%"PRIx64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, endt - startt); _end: diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 84cd7fe93b07c2d20b8d6f2aabd0097d983b402e..f00d5ceb41aad443911036925ef92a6fc3fd4030 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -14,7 +14,6 @@ */ #include "os.h" -#include "qFill.h" #include "taosmsg.h" #include "tcache.h" #include "tglobal.h" @@ -23,13 +22,11 @@ #include "hash.h" #include "texpr.h" #include "qExecutor.h" -#include "qResultbuf.h" #include "qUtil.h" #include "query.h" #include "queryLog.h" #include "tlosertree.h" #include "ttype.h" -#include "tcompare.h" typedef struct SQueryMgmt { pthread_mutex_t lock; @@ -58,10 +55,13 @@ void freeParam(SQueryParam *param) { tfree(param->tagCond); tfree(param->tbnameCond); tfree(param->pTableIdList); - tfree(param->pExprMsg); - tfree(param->pSecExprMsg); + taosArrayDestroy(param->pOperator); tfree(param->pExprs); tfree(param->pSecExprs); + + tfree(param->pExpr); + tfree(param->pSecExpr); + tfree(param->pGroupColIndex); tfree(param->pTagColumnInfo); tfree(param->pGroupbyExpr); @@ -91,12 +91,14 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols}; + if ((code = createQueryFunc(&info, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExpr, param.pTagColumnInfo, + pQueryMsg->queryType, pQueryMsg)) != TSDB_CODE_SUCCESS) { goto _over; } - if (param.pSecExprMsg != NULL) { - if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pExprs)) != TSDB_CODE_SUCCESS) { + if (param.pSecExpr != NULL) { + if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExpr, param.pExprs)) != TSDB_CODE_SUCCESS) { goto _over; } } @@ -158,7 +160,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, param.pTagColumnInfo, isSTableQuery, param.sql, qId); + assert(pQueryMsg->stableQuery == isSTableQuery); + (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, + param.pTagColumnInfo, vgId, param.sql, qId); param.sql = NULL; param.pExprs = NULL; @@ -171,7 +175,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, ¶m, isSTableQuery); + code = initQInfo(&pQueryMsg->tsBuf, tsdb, NULL, *pQInfo, ¶m, (char*)pQueryMsg, pQueryMsg->prevResultLen, NULL); _over: if (param.pGroupbyExpr != NULL) { @@ -184,8 +188,8 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi freeParam(¶m); for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { - SColumnInfo* column = pQueryMsg->colList + i; - freeColumnFilterInfo(column->filters, column->numOfFilters); + SColumnInfo* column = pQueryMsg->tableCols + i; + freeColumnFilterInfo(column->flist.filterInfo, column->flist.numOfFilters); } //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; @@ -205,23 +209,22 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int64_t curOwner = 0; if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { - qError("QInfo:%"PRIu64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner); + qError("QInfo:0x%"PRIx64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner); pQInfo->code = TSDB_CODE_QRY_IN_EXEC; return false; } - *qId = pQInfo->qId; pQInfo->startExecTs = taosGetTimestampSec(); if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%"PRIu64" it is already killed, abort", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); return doBuildResCheck(pQInfo); } SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%"PRIu64" no table exists for query, abort", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" no table exists for query, abort", pQInfo->qId); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); return doBuildResCheck(pQInfo); } @@ -230,21 +233,22 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int32_t ret = setjmp(pQInfo->runtimeEnv.env); if (ret != TSDB_CODE_SUCCESS) { pQInfo->code = ret; - qDebug("QInfo:%"PRIu64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code)); + qDebug("QInfo:0x%"PRIx64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code)); return doBuildResCheck(pQInfo); } - qDebug("QInfo:%"PRIu64" query task is launched", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" query task is launched", pQInfo->qId); - pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot); + bool newgroup = false; + pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%"PRIu64" query is killed", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" query is killed", pQInfo->qId); } else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) { - qDebug("QInfo:%"PRIu64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables, + qDebug("QInfo:0x%"PRIx64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables, pRuntimeEnv->resultInfo.total); } else { - qDebug("QInfo:%"PRIu64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows", + qDebug("QInfo:0x%"PRIx64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows", pQInfo->qId, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv)); } @@ -255,13 +259,13 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex SQInfo *pQInfo = (SQInfo *)qinfo; if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - qError("QInfo:%"PRIu64" invalid qhandle", pQInfo->qId); + qError("QInfo:0x%"PRIx64" invalid qhandle", pQInfo->qId); return TSDB_CODE_QRY_INVALID_QHANDLE; } *buildRes = false; if (IS_QUERY_KILLED(pQInfo)) { - qDebug("QInfo:%"PRIu64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code); + qDebug("QInfo:0x%"PRIx64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code); return pQInfo->code; } @@ -274,18 +278,18 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex code = pQInfo->code; } else { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; pthread_mutex_lock(&pQInfo->lock); assert(pQInfo->rspContext == NULL); if (pQInfo->dataReady == QUERY_RESULT_READY) { *buildRes = true; - qDebug("QInfo:%"PRIu64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQuery->resultRowSize, + qDebug("QInfo:0x%"PRIx64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQueryAttr->resultRowSize, GET_NUM_OF_RESULTS(pRuntimeEnv), tstrerror(pQInfo->code)); } else { *buildRes = false; - qDebug("QInfo:%"PRIu64" retrieve req set query return result after paused", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" retrieve req set query return result after paused", pQInfo->qId); pQInfo->rspContext = pRspContext; assert(pQInfo->rspContext != NULL); } @@ -304,11 +308,11 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co return TSDB_CODE_QRY_INVALID_QHANDLE; } - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; int32_t s = GET_NUM_OF_RESULTS(pRuntimeEnv); - size_t size = pQuery->resultRowSize * s; + size_t size = pQueryAttr->resultRowSize * s; size += sizeof(int32_t); size += sizeof(STableIdInfo) * taosHashGetSize(pRuntimeEnv->pTableRetrieveTsMap); @@ -330,7 +334,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co (*pRsp)->useconds = htobe64(pQInfo->summary.elapsedTime); } - (*pRsp)->precision = htons(pQuery->precision); + (*pRsp)->precision = htons(pQueryAttr->precision); if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { doDumpQueryResult(pQInfo, (*pRsp)->data); } else { @@ -344,10 +348,10 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. *continueExec = false; (*pRsp)->completed = 1; // notify no more result to client - qDebug("QInfo:%"PRIu64" no more results to retrieve", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" no more results to retrieve", pQInfo->qId); } else { *continueExec = true; - qDebug("QInfo:%"PRIu64" has more results to retrieve", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" has more results to retrieve", pQInfo->qId); } // the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS @@ -373,7 +377,7 @@ int32_t qKillQuery(qinfo_t qinfo) { return TSDB_CODE_QRY_INVALID_QHANDLE; } - qDebug("QInfo:%"PRIu64" query killed", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" query killed", pQInfo->qId); setQueryKilled(pQInfo); // Wait for the query executing thread being stopped/ @@ -401,7 +405,7 @@ void qDestroyQueryInfo(qinfo_t qHandle) { return; } - qDebug("QInfo:%"PRIu64" query completed", pQInfo->qId); + qDebug("QInfo:0x%"PRIx64" query completed", pQInfo->qId); queryCostStatis(pQInfo); // print the query cost summary freeQInfo(pQInfo); } @@ -484,7 +488,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { - qError("QInfo:%"PRIu64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo); + qError("QInfo:0x%"PRIx64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo); terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } @@ -492,7 +496,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) { pthread_mutex_lock(&pQueryMgmt->lock); if (pQueryMgmt->closed) { pthread_mutex_unlock(&pQueryMgmt->lock); - qError("QInfo:%"PRIu64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo); + qError("QInfo:0x%"PRIx64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo); terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } else { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 3c22bd85cc31b8bb6f8f39585b41f0fed1300b16..f3929da022391c510bdfff203adcd5c7c5f03232 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -23,7 +23,6 @@ ** input grammar file: */ #include -#include /************ Begin %include sections from the grammar ************************/ #include @@ -77,10 +76,8 @@ ** zero the stack is dynamically sized using realloc() ** ParseARG_SDECL A static variable declaration for the %extra_argument ** ParseARG_PDECL A parameter declaration for the %extra_argument -** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter ** ParseARG_STORE Code to store %extra_argument into yypParser ** ParseARG_FETCH Code to extract %extra_argument from yypParser -** ParseCTX_* As ParseARG_ except for %extra_context ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -100,57 +97,48 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 262 +#define YYNOCODE 264 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SLimitVal yy18; - SFromInfo* yy70; - SSessionWindowVal yy87; - SCreateDbInfo yy94; - int yy116; - SSubclauseInfo* yy141; - tSqlExpr* yy170; - SCreateTableSql* yy194; - tVariant yy218; - SIntervalVal yy220; - SCreatedTableInfo yy252; - SQuerySqlNode* yy254; - SCreateAcctInfo yy419; - SArray* yy429; - TAOS_FIELD yy451; - int64_t yy481; + SCreateTableSql* yy14; + int yy20; + SSqlNode* yy116; + tSqlExpr* yy118; + SArray* yy159; + SIntervalVal yy184; + SCreatedTableInfo yy206; + SRelationInfo* yy236; + SSessionWindowVal yy249; + int64_t yy317; + SCreateDbInfo yy322; + SCreateAcctInfo yy351; + TAOS_FIELD yy407; + SLimitVal yy440; + tVariant yy488; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 #endif #define ParseARG_SDECL SSqlInfo* pInfo; #define ParseARG_PDECL ,SSqlInfo* pInfo -#define ParseARG_PARAM ,pInfo -#define ParseARG_FETCH SSqlInfo* pInfo=yypParser->pInfo; -#define ParseARG_STORE yypParser->pInfo=pInfo; -#define ParseCTX_SDECL -#define ParseCTX_PDECL -#define ParseCTX_PARAM -#define ParseCTX_FETCH -#define ParseCTX_STORE +#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo +#define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 #define YYNSTATE 315 -#define YYNRULE 267 -#define YYNRULE_WITH_ACTION 267 +#define YYNRULE 269 #define YYNTOKEN 187 #define YY_MAX_SHIFT 314 -#define YY_MIN_SHIFTREDUCE 506 -#define YY_MAX_SHIFTREDUCE 772 -#define YY_ERROR_ACTION 773 -#define YY_ACCEPT_ACTION 774 -#define YY_NO_ACTION 775 -#define YY_MIN_REDUCE 776 -#define YY_MAX_REDUCE 1042 +#define YY_MIN_SHIFTREDUCE 508 +#define YY_MAX_SHIFTREDUCE 776 +#define YY_ERROR_ACTION 777 +#define YY_ACCEPT_ACTION 778 +#define YY_NO_ACTION 779 +#define YY_MIN_REDUCE 780 +#define YY_MAX_REDUCE 1048 /************* End control #defines *******************************************/ -#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. @@ -215,260 +203,260 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (681) +#define YY_ACTTAB_COUNT (683) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 133, 553, 202, 312, 206, 140, 943, 17, 85, 554, - /* 10 */ 774, 314, 179, 47, 48, 140, 51, 52, 30, 181, - /* 20 */ 214, 41, 181, 50, 262, 55, 53, 57, 54, 1023, - /* 30 */ 922, 209, 1024, 46, 45, 185, 181, 44, 43, 42, - /* 40 */ 47, 48, 910, 51, 52, 208, 1024, 214, 41, 553, - /* 50 */ 50, 262, 55, 53, 57, 54, 934, 554, 1020, 203, - /* 60 */ 46, 45, 919, 247, 44, 43, 42, 48, 940, 51, - /* 70 */ 52, 242, 974, 214, 41, 553, 50, 262, 55, 53, - /* 80 */ 57, 54, 975, 554, 257, 278, 46, 45, 298, 225, - /* 90 */ 44, 43, 42, 507, 508, 509, 510, 511, 512, 513, - /* 100 */ 514, 515, 516, 517, 518, 519, 313, 632, 1019, 231, - /* 110 */ 70, 553, 30, 47, 48, 1018, 51, 52, 821, 554, - /* 120 */ 214, 41, 166, 50, 262, 55, 53, 57, 54, 44, - /* 130 */ 43, 42, 718, 46, 45, 288, 287, 44, 43, 42, - /* 140 */ 47, 49, 830, 51, 52, 198, 166, 214, 41, 234, - /* 150 */ 50, 262, 55, 53, 57, 54, 918, 238, 237, 227, + /* 0 */ 133, 555, 204, 312, 208, 140, 947, 226, 140, 556, + /* 10 */ 778, 314, 17, 47, 48, 140, 51, 52, 30, 181, + /* 20 */ 214, 41, 181, 50, 262, 55, 53, 57, 54, 1029, + /* 30 */ 926, 211, 1030, 46, 45, 179, 181, 44, 43, 42, + /* 40 */ 47, 48, 924, 51, 52, 210, 1030, 214, 41, 555, + /* 50 */ 50, 262, 55, 53, 57, 54, 938, 556, 185, 205, + /* 60 */ 46, 45, 923, 247, 44, 43, 42, 48, 944, 51, + /* 70 */ 52, 242, 978, 214, 41, 79, 50, 262, 55, 53, + /* 80 */ 57, 54, 979, 634, 257, 30, 46, 45, 278, 225, + /* 90 */ 44, 43, 42, 509, 510, 511, 512, 513, 514, 515, + /* 100 */ 516, 517, 518, 519, 520, 521, 313, 555, 85, 231, + /* 110 */ 70, 288, 287, 47, 48, 556, 51, 52, 298, 219, + /* 120 */ 214, 41, 555, 50, 262, 55, 53, 57, 54, 922, + /* 130 */ 556, 105, 720, 46, 45, 1026, 298, 44, 43, 42, + /* 140 */ 47, 49, 914, 51, 52, 926, 140, 214, 41, 234, + /* 150 */ 50, 262, 55, 53, 57, 54, 1025, 238, 237, 227, /* 160 */ 46, 45, 285, 284, 44, 43, 42, 23, 276, 307, /* 170 */ 306, 275, 274, 273, 305, 272, 304, 303, 302, 271, - /* 180 */ 301, 300, 882, 140, 870, 871, 872, 873, 874, 875, - /* 190 */ 876, 877, 878, 879, 880, 881, 883, 884, 51, 52, - /* 200 */ 822, 219, 214, 41, 166, 50, 262, 55, 53, 57, - /* 210 */ 54, 223, 18, 82, 25, 46, 45, 199, 226, 44, - /* 220 */ 43, 42, 213, 731, 934, 221, 722, 922, 725, 190, - /* 230 */ 728, 183, 213, 731, 140, 191, 722, 908, 725, 204, - /* 240 */ 728, 118, 117, 189, 905, 906, 29, 909, 259, 74, - /* 250 */ 78, 922, 30, 920, 210, 211, 308, 36, 261, 69, - /* 260 */ 23, 916, 307, 306, 210, 211, 61, 305, 30, 304, - /* 270 */ 303, 302, 74, 301, 300, 890, 3, 167, 888, 889, - /* 280 */ 36, 224, 922, 891, 280, 893, 894, 892, 62, 895, - /* 290 */ 896, 907, 656, 217, 12, 653, 919, 654, 84, 655, - /* 300 */ 81, 79, 241, 220, 68, 55, 53, 57, 54, 218, - /* 310 */ 197, 184, 919, 46, 45, 30, 278, 44, 43, 42, - /* 320 */ 80, 103, 108, 228, 229, 56, 263, 97, 107, 113, - /* 330 */ 116, 106, 732, 71, 671, 56, 186, 110, 730, 30, - /* 340 */ 180, 30, 732, 5, 156, 30, 699, 700, 730, 33, - /* 350 */ 155, 92, 87, 91, 729, 668, 281, 678, 105, 919, - /* 360 */ 174, 170, 24, 298, 729, 245, 172, 169, 121, 120, - /* 370 */ 119, 46, 45, 1, 154, 44, 43, 42, 720, 724, - /* 380 */ 282, 727, 286, 919, 243, 919, 290, 187, 31, 919, - /* 390 */ 311, 310, 126, 684, 212, 64, 690, 135, 691, 752, - /* 400 */ 60, 657, 20, 19, 733, 723, 642, 726, 19, 265, - /* 410 */ 31, 188, 675, 31, 721, 65, 96, 95, 194, 644, - /* 420 */ 267, 643, 735, 60, 83, 60, 28, 14, 13, 268, - /* 430 */ 102, 101, 67, 660, 631, 661, 195, 658, 6, 659, - /* 440 */ 16, 15, 115, 114, 131, 129, 193, 178, 192, 182, - /* 450 */ 1034, 921, 985, 984, 215, 981, 980, 239, 216, 289, - /* 460 */ 132, 942, 39, 950, 952, 134, 138, 935, 246, 967, - /* 470 */ 130, 966, 917, 150, 151, 915, 299, 152, 683, 248, - /* 480 */ 886, 104, 291, 149, 147, 153, 833, 142, 932, 141, - /* 490 */ 270, 66, 205, 37, 250, 176, 34, 279, 829, 1039, - /* 500 */ 93, 255, 1038, 1036, 143, 63, 58, 157, 283, 1033, - /* 510 */ 99, 1032, 260, 1030, 158, 851, 256, 35, 258, 32, - /* 520 */ 38, 177, 818, 109, 254, 816, 111, 112, 252, 814, - /* 530 */ 813, 230, 168, 811, 810, 809, 808, 807, 806, 171, - /* 540 */ 173, 803, 801, 799, 797, 795, 175, 249, 244, 72, - /* 550 */ 75, 251, 40, 968, 292, 293, 294, 295, 296, 200, - /* 560 */ 297, 222, 269, 309, 772, 233, 232, 771, 88, 201, - /* 570 */ 235, 196, 89, 236, 770, 758, 757, 240, 245, 8, - /* 580 */ 264, 73, 663, 136, 812, 161, 165, 685, 852, 159, - /* 590 */ 160, 162, 164, 163, 122, 123, 805, 76, 124, 804, - /* 600 */ 4, 688, 137, 125, 796, 77, 146, 144, 148, 145, - /* 610 */ 207, 2, 898, 253, 26, 692, 139, 9, 10, 734, - /* 620 */ 27, 7, 11, 21, 736, 22, 86, 266, 595, 591, - /* 630 */ 84, 589, 588, 587, 584, 557, 277, 90, 94, 31, - /* 640 */ 634, 59, 633, 630, 579, 98, 100, 577, 569, 575, - /* 650 */ 571, 573, 567, 565, 598, 597, 596, 594, 593, 592, - /* 660 */ 590, 586, 585, 60, 555, 523, 521, 776, 775, 775, - /* 670 */ 775, 775, 775, 775, 775, 775, 775, 775, 775, 127, - /* 680 */ 128, + /* 180 */ 301, 300, 886, 30, 874, 875, 876, 877, 878, 879, + /* 190 */ 880, 881, 882, 883, 884, 885, 887, 888, 51, 52, + /* 200 */ 825, 1024, 214, 41, 166, 50, 262, 55, 53, 57, + /* 210 */ 54, 259, 18, 78, 82, 46, 45, 61, 223, 44, + /* 220 */ 43, 42, 213, 735, 217, 25, 724, 923, 727, 190, + /* 230 */ 730, 221, 213, 735, 198, 191, 724, 912, 727, 62, + /* 240 */ 730, 118, 117, 189, 69, 909, 910, 29, 913, 44, + /* 250 */ 43, 42, 30, 74, 200, 201, 308, 926, 261, 30, + /* 260 */ 23, 36, 307, 306, 200, 201, 938, 305, 30, 304, + /* 270 */ 303, 302, 74, 301, 300, 894, 911, 199, 892, 893, + /* 280 */ 36, 206, 926, 895, 920, 897, 898, 896, 224, 899, + /* 290 */ 900, 280, 658, 218, 834, 655, 923, 656, 166, 657, + /* 300 */ 281, 673, 241, 923, 68, 55, 53, 57, 54, 282, + /* 310 */ 197, 263, 923, 46, 45, 30, 278, 44, 43, 42, + /* 320 */ 80, 103, 108, 228, 229, 56, 220, 97, 107, 113, + /* 330 */ 116, 106, 736, 71, 726, 56, 729, 110, 732, 30, + /* 340 */ 1, 154, 736, 5, 156, 725, 183, 728, 732, 33, + /* 350 */ 155, 92, 87, 91, 731, 680, 286, 184, 826, 923, + /* 360 */ 174, 170, 166, 245, 731, 212, 172, 169, 121, 120, + /* 370 */ 119, 46, 45, 3, 167, 44, 43, 42, 12, 677, + /* 380 */ 290, 722, 84, 923, 81, 670, 311, 310, 126, 701, + /* 390 */ 702, 243, 24, 686, 692, 31, 693, 135, 60, 756, + /* 400 */ 20, 659, 737, 19, 64, 186, 19, 739, 644, 6, + /* 410 */ 180, 265, 31, 187, 646, 31, 267, 723, 60, 645, + /* 420 */ 83, 188, 28, 60, 65, 268, 662, 67, 663, 633, + /* 430 */ 96, 95, 660, 194, 661, 115, 114, 14, 13, 102, + /* 440 */ 101, 195, 16, 15, 131, 129, 733, 193, 178, 192, + /* 450 */ 182, 1040, 925, 989, 988, 215, 985, 734, 239, 984, + /* 460 */ 216, 289, 132, 946, 39, 971, 954, 970, 956, 939, + /* 470 */ 246, 130, 248, 134, 138, 921, 150, 244, 151, 207, + /* 480 */ 250, 299, 685, 149, 919, 255, 142, 936, 143, 141, + /* 490 */ 144, 152, 256, 153, 260, 258, 66, 145, 837, 270, + /* 500 */ 63, 37, 58, 176, 34, 254, 279, 833, 1045, 252, + /* 510 */ 93, 1044, 1042, 249, 147, 157, 283, 1039, 99, 1038, + /* 520 */ 146, 1036, 158, 855, 35, 32, 38, 177, 822, 40, + /* 530 */ 109, 104, 820, 111, 112, 818, 817, 230, 168, 815, + /* 540 */ 814, 813, 812, 811, 810, 171, 173, 807, 805, 803, + /* 550 */ 291, 801, 799, 175, 292, 72, 75, 293, 251, 972, + /* 560 */ 294, 295, 296, 297, 309, 776, 202, 232, 222, 269, + /* 570 */ 233, 775, 236, 235, 774, 761, 203, 762, 88, 196, + /* 580 */ 89, 240, 245, 264, 8, 73, 76, 665, 687, 690, + /* 590 */ 816, 161, 136, 122, 856, 159, 164, 160, 162, 163, + /* 600 */ 165, 123, 809, 2, 890, 124, 808, 4, 125, 800, + /* 610 */ 137, 209, 77, 148, 253, 26, 694, 139, 9, 902, + /* 620 */ 10, 27, 738, 7, 11, 740, 21, 22, 266, 86, + /* 630 */ 597, 593, 84, 591, 590, 589, 586, 559, 277, 90, + /* 640 */ 31, 94, 98, 59, 100, 636, 635, 632, 581, 579, + /* 650 */ 571, 577, 573, 575, 569, 567, 600, 599, 598, 596, + /* 660 */ 595, 594, 592, 588, 587, 60, 557, 525, 523, 780, + /* 670 */ 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, + /* 680 */ 779, 127, 128, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 190, 1, 189, 190, 209, 190, 190, 251, 196, 9, - /* 10 */ 187, 188, 251, 13, 14, 190, 16, 17, 190, 251, - /* 20 */ 20, 21, 251, 23, 24, 25, 26, 27, 28, 261, - /* 30 */ 235, 260, 261, 33, 34, 251, 251, 37, 38, 39, - /* 40 */ 13, 14, 230, 16, 17, 260, 261, 20, 21, 1, - /* 50 */ 23, 24, 25, 26, 27, 28, 233, 9, 251, 231, - /* 60 */ 33, 34, 234, 253, 37, 38, 39, 14, 252, 16, - /* 70 */ 17, 248, 257, 20, 21, 1, 23, 24, 25, 26, - /* 80 */ 27, 28, 257, 9, 259, 79, 33, 34, 81, 67, + /* 0 */ 191, 1, 190, 191, 210, 191, 191, 191, 191, 9, + /* 10 */ 188, 189, 252, 13, 14, 191, 16, 17, 191, 252, + /* 20 */ 20, 21, 252, 23, 24, 25, 26, 27, 28, 262, + /* 30 */ 236, 261, 262, 33, 34, 252, 252, 37, 38, 39, + /* 40 */ 13, 14, 226, 16, 17, 261, 262, 20, 21, 1, + /* 50 */ 23, 24, 25, 26, 27, 28, 234, 9, 252, 232, + /* 60 */ 33, 34, 235, 254, 37, 38, 39, 14, 253, 16, + /* 70 */ 17, 249, 258, 20, 21, 258, 23, 24, 25, 26, + /* 80 */ 27, 28, 258, 5, 260, 191, 33, 34, 79, 67, /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, - /* 100 */ 52, 53, 54, 55, 56, 57, 58, 5, 251, 61, - /* 110 */ 110, 1, 190, 13, 14, 251, 16, 17, 195, 9, - /* 120 */ 20, 21, 199, 23, 24, 25, 26, 27, 28, 37, - /* 130 */ 38, 39, 105, 33, 34, 33, 34, 37, 38, 39, - /* 140 */ 13, 14, 195, 16, 17, 251, 199, 20, 21, 135, - /* 150 */ 23, 24, 25, 26, 27, 28, 234, 143, 144, 137, + /* 100 */ 52, 53, 54, 55, 56, 57, 58, 1, 197, 61, + /* 110 */ 110, 33, 34, 13, 14, 9, 16, 17, 81, 210, + /* 120 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 235, + /* 130 */ 9, 76, 105, 33, 34, 252, 81, 37, 38, 39, + /* 140 */ 13, 14, 231, 16, 17, 236, 191, 20, 21, 135, + /* 150 */ 23, 24, 25, 26, 27, 28, 252, 143, 144, 137, /* 160 */ 33, 34, 140, 141, 37, 38, 39, 88, 89, 90, /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - /* 180 */ 101, 102, 208, 190, 210, 211, 212, 213, 214, 215, - /* 190 */ 216, 217, 218, 219, 220, 221, 222, 223, 16, 17, - /* 200 */ 195, 209, 20, 21, 199, 23, 24, 25, 26, 27, - /* 210 */ 28, 67, 44, 196, 104, 33, 34, 251, 190, 37, - /* 220 */ 38, 39, 1, 2, 233, 209, 5, 235, 7, 61, - /* 230 */ 9, 251, 1, 2, 190, 67, 5, 0, 7, 248, - /* 240 */ 9, 73, 74, 75, 227, 228, 229, 230, 255, 104, - /* 250 */ 257, 235, 190, 225, 33, 34, 209, 112, 37, 196, - /* 260 */ 88, 190, 90, 91, 33, 34, 109, 95, 190, 97, - /* 270 */ 98, 99, 104, 101, 102, 208, 193, 194, 211, 212, - /* 280 */ 112, 137, 235, 216, 140, 218, 219, 220, 131, 222, - /* 290 */ 223, 228, 2, 231, 104, 5, 234, 7, 108, 9, - /* 300 */ 110, 257, 134, 232, 136, 25, 26, 27, 28, 231, - /* 310 */ 142, 251, 234, 33, 34, 190, 79, 37, 38, 39, - /* 320 */ 236, 62, 63, 33, 34, 104, 15, 68, 69, 70, - /* 330 */ 71, 72, 111, 249, 37, 104, 251, 78, 117, 190, - /* 340 */ 251, 190, 111, 62, 63, 190, 124, 125, 117, 68, - /* 350 */ 69, 70, 71, 72, 133, 109, 231, 105, 76, 234, - /* 360 */ 62, 63, 116, 81, 133, 113, 68, 69, 70, 71, - /* 370 */ 72, 33, 34, 197, 198, 37, 38, 39, 1, 5, - /* 380 */ 231, 7, 231, 234, 105, 234, 231, 251, 109, 234, - /* 390 */ 64, 65, 66, 105, 60, 109, 105, 109, 105, 105, - /* 400 */ 109, 111, 109, 109, 105, 5, 105, 7, 109, 105, - /* 410 */ 109, 251, 115, 109, 37, 129, 138, 139, 251, 105, - /* 420 */ 105, 105, 111, 109, 109, 109, 104, 138, 139, 107, - /* 430 */ 138, 139, 104, 5, 106, 7, 251, 5, 104, 7, - /* 440 */ 138, 139, 76, 77, 62, 63, 251, 251, 251, 251, - /* 450 */ 235, 235, 226, 226, 226, 226, 226, 190, 226, 226, - /* 460 */ 190, 190, 250, 190, 190, 190, 190, 233, 233, 258, - /* 470 */ 60, 258, 233, 237, 190, 190, 103, 190, 117, 254, - /* 480 */ 224, 87, 86, 238, 240, 190, 190, 245, 247, 246, - /* 490 */ 190, 128, 254, 190, 254, 190, 190, 190, 190, 190, - /* 500 */ 190, 254, 190, 190, 244, 130, 127, 190, 190, 190, - /* 510 */ 190, 190, 122, 190, 190, 190, 121, 190, 126, 190, - /* 520 */ 190, 190, 190, 190, 120, 190, 190, 190, 119, 190, - /* 530 */ 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, - /* 540 */ 190, 190, 190, 190, 190, 190, 190, 118, 191, 191, - /* 550 */ 191, 191, 132, 191, 50, 83, 85, 54, 84, 191, - /* 560 */ 82, 191, 191, 79, 5, 5, 145, 5, 196, 191, - /* 570 */ 145, 191, 196, 5, 5, 90, 89, 135, 113, 104, - /* 580 */ 107, 114, 105, 104, 191, 201, 200, 105, 207, 206, - /* 590 */ 205, 204, 203, 202, 192, 192, 191, 109, 192, 191, - /* 600 */ 193, 105, 109, 192, 191, 104, 241, 243, 239, 242, - /* 610 */ 1, 197, 224, 104, 109, 105, 104, 123, 123, 105, - /* 620 */ 109, 104, 104, 104, 111, 104, 76, 107, 9, 5, - /* 630 */ 108, 5, 5, 5, 5, 80, 15, 76, 139, 109, - /* 640 */ 5, 16, 5, 105, 5, 139, 139, 5, 5, 5, + /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, + /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, + /* 200 */ 196, 252, 20, 21, 200, 23, 24, 25, 26, 27, + /* 210 */ 28, 256, 44, 258, 197, 33, 34, 109, 67, 37, + /* 220 */ 38, 39, 1, 2, 232, 104, 5, 235, 7, 61, + /* 230 */ 9, 210, 1, 2, 252, 67, 5, 0, 7, 131, + /* 240 */ 9, 73, 74, 75, 197, 228, 229, 230, 231, 37, + /* 250 */ 38, 39, 191, 104, 33, 34, 210, 236, 37, 191, + /* 260 */ 88, 112, 90, 91, 33, 34, 234, 95, 191, 97, + /* 270 */ 98, 99, 104, 101, 102, 209, 229, 252, 212, 213, + /* 280 */ 112, 249, 236, 217, 191, 219, 220, 221, 137, 223, + /* 290 */ 224, 140, 2, 232, 196, 5, 235, 7, 200, 9, + /* 300 */ 232, 37, 134, 235, 136, 25, 26, 27, 28, 232, + /* 310 */ 142, 15, 235, 33, 34, 191, 79, 37, 38, 39, + /* 320 */ 237, 62, 63, 33, 34, 104, 233, 68, 69, 70, + /* 330 */ 71, 72, 111, 250, 5, 104, 7, 78, 117, 191, + /* 340 */ 198, 199, 111, 62, 63, 5, 252, 7, 117, 68, + /* 350 */ 69, 70, 71, 72, 133, 105, 232, 252, 196, 235, + /* 360 */ 62, 63, 200, 113, 133, 60, 68, 69, 70, 71, + /* 370 */ 72, 33, 34, 194, 195, 37, 38, 39, 104, 115, + /* 380 */ 232, 1, 108, 235, 110, 109, 64, 65, 66, 124, + /* 390 */ 125, 105, 116, 105, 105, 109, 105, 109, 109, 105, + /* 400 */ 109, 111, 105, 109, 109, 252, 109, 111, 105, 104, + /* 410 */ 252, 105, 109, 252, 105, 109, 105, 37, 109, 105, + /* 420 */ 109, 252, 104, 109, 129, 107, 5, 104, 7, 106, + /* 430 */ 138, 139, 5, 252, 7, 76, 77, 138, 139, 138, + /* 440 */ 139, 252, 138, 139, 62, 63, 117, 252, 252, 252, + /* 450 */ 252, 236, 236, 227, 227, 227, 227, 117, 191, 227, + /* 460 */ 227, 227, 191, 191, 251, 259, 191, 259, 191, 234, + /* 470 */ 234, 60, 255, 191, 191, 234, 238, 192, 191, 255, + /* 480 */ 255, 103, 117, 239, 191, 255, 246, 248, 245, 247, + /* 490 */ 244, 191, 121, 191, 122, 126, 128, 243, 191, 191, + /* 500 */ 130, 191, 127, 191, 191, 120, 191, 191, 191, 119, + /* 510 */ 191, 191, 191, 118, 241, 191, 191, 191, 191, 191, + /* 520 */ 242, 191, 191, 191, 191, 191, 191, 191, 191, 132, + /* 530 */ 191, 87, 191, 191, 191, 191, 191, 191, 191, 191, + /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 550 */ 86, 191, 191, 191, 50, 192, 192, 83, 192, 192, + /* 560 */ 85, 54, 84, 82, 79, 5, 192, 145, 192, 192, + /* 570 */ 5, 5, 5, 145, 5, 89, 192, 90, 197, 192, + /* 580 */ 197, 135, 113, 107, 104, 114, 109, 105, 105, 105, + /* 590 */ 192, 202, 104, 193, 208, 207, 204, 206, 205, 203, + /* 600 */ 201, 193, 192, 198, 225, 193, 192, 194, 193, 192, + /* 610 */ 109, 1, 104, 240, 104, 109, 105, 104, 123, 225, + /* 620 */ 123, 109, 105, 104, 104, 111, 104, 104, 107, 76, + /* 630 */ 9, 5, 108, 5, 5, 5, 5, 80, 15, 76, + /* 640 */ 109, 139, 139, 16, 139, 5, 5, 105, 5, 5, /* 650 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 660 */ 5, 5, 5, 109, 80, 60, 59, 0, 262, 262, - /* 670 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 21, - /* 680 */ 21, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 690 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 700 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 710 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 720 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 730 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 740 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 750 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 760 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 770 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 780 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 790 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 800 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 810 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 820 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 830 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 840 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 850 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 860 */ 262, 262, 262, 262, 262, 262, 262, 262, + /* 660 */ 5, 5, 5, 5, 5, 109, 80, 60, 59, 0, + /* 670 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 680 */ 263, 21, 21, 263, 263, 263, 263, 263, 263, 263, + /* 690 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 700 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 710 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 720 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 730 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 740 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 750 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 760 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 770 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 780 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 790 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 800 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 810 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 820 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 830 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 840 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 850 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 860 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, }; #define YY_SHIFT_COUNT (314) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (667) +#define YY_SHIFT_MAX (669) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 168, 79, 79, 172, 172, 6, 221, 231, 74, 74, - /* 10 */ 74, 74, 74, 74, 74, 74, 74, 0, 48, 231, - /* 20 */ 290, 290, 290, 290, 110, 145, 74, 74, 74, 237, - /* 30 */ 74, 74, 282, 6, 7, 7, 681, 681, 681, 231, + /* 0 */ 168, 79, 79, 172, 172, 9, 221, 231, 106, 106, + /* 10 */ 106, 106, 106, 106, 106, 106, 106, 0, 48, 231, + /* 20 */ 290, 290, 290, 290, 121, 149, 106, 106, 106, 237, + /* 30 */ 106, 106, 55, 9, 37, 37, 683, 683, 683, 231, /* 40 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, /* 50 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 290, - /* 60 */ 290, 102, 102, 102, 102, 102, 102, 102, 74, 74, - /* 70 */ 74, 297, 74, 145, 145, 74, 74, 74, 222, 222, - /* 80 */ 246, 145, 74, 74, 74, 74, 74, 74, 74, 74, - /* 90 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 100 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 110 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 120 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 130 */ 74, 74, 410, 410, 410, 361, 361, 361, 410, 361, - /* 140 */ 410, 363, 375, 379, 390, 392, 395, 404, 409, 429, - /* 150 */ 420, 410, 410, 410, 373, 6, 6, 410, 410, 394, - /* 160 */ 396, 504, 472, 471, 503, 474, 478, 373, 410, 484, - /* 170 */ 484, 410, 484, 410, 484, 410, 681, 681, 27, 100, + /* 60 */ 290, 78, 78, 78, 78, 78, 78, 78, 106, 106, + /* 70 */ 106, 264, 106, 149, 149, 106, 106, 106, 265, 265, + /* 80 */ 276, 149, 106, 106, 106, 106, 106, 106, 106, 106, + /* 90 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 100 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 110 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 120 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 130 */ 106, 106, 411, 411, 411, 365, 365, 365, 411, 365, + /* 140 */ 411, 368, 370, 375, 372, 369, 371, 385, 390, 395, + /* 150 */ 397, 411, 411, 411, 378, 9, 9, 411, 411, 444, + /* 160 */ 464, 504, 474, 475, 507, 478, 481, 378, 411, 485, + /* 170 */ 485, 411, 485, 411, 485, 411, 683, 683, 27, 100, /* 180 */ 127, 100, 100, 53, 182, 280, 280, 280, 280, 259, - /* 190 */ 281, 298, 338, 338, 338, 338, 22, 14, 92, 92, - /* 200 */ 190, 144, 326, 279, 252, 288, 291, 293, 294, 299, - /* 210 */ 374, 400, 377, 334, 311, 157, 286, 301, 304, 314, - /* 220 */ 315, 316, 322, 278, 289, 292, 328, 302, 428, 432, - /* 230 */ 366, 382, 559, 421, 560, 562, 425, 568, 569, 485, - /* 240 */ 487, 442, 465, 473, 475, 467, 477, 488, 482, 479, - /* 250 */ 496, 493, 501, 609, 509, 510, 512, 505, 494, 511, - /* 260 */ 495, 514, 517, 513, 518, 473, 519, 520, 521, 522, - /* 270 */ 550, 619, 624, 626, 627, 628, 629, 555, 621, 561, - /* 280 */ 499, 530, 530, 625, 506, 507, 530, 635, 637, 538, - /* 290 */ 530, 639, 642, 643, 644, 645, 646, 647, 648, 649, - /* 300 */ 650, 651, 652, 653, 654, 655, 656, 657, 554, 584, - /* 310 */ 658, 659, 605, 607, 667, + /* 190 */ 281, 298, 338, 338, 338, 338, 22, 14, 212, 212, + /* 200 */ 329, 340, 274, 151, 322, 286, 250, 288, 289, 291, + /* 210 */ 294, 297, 380, 305, 296, 108, 295, 303, 306, 309, + /* 220 */ 311, 314, 318, 292, 299, 301, 323, 304, 421, 427, + /* 230 */ 359, 382, 560, 422, 565, 566, 428, 567, 569, 487, + /* 240 */ 486, 446, 469, 476, 480, 471, 482, 477, 483, 488, + /* 250 */ 484, 501, 508, 610, 510, 511, 513, 506, 495, 512, + /* 260 */ 497, 517, 519, 514, 520, 476, 522, 521, 523, 524, + /* 270 */ 553, 621, 626, 628, 629, 630, 631, 557, 623, 563, + /* 280 */ 502, 531, 531, 627, 503, 505, 531, 640, 641, 542, + /* 290 */ 531, 643, 644, 645, 646, 647, 648, 649, 650, 651, + /* 300 */ 652, 653, 654, 655, 656, 657, 658, 659, 556, 586, + /* 310 */ 660, 661, 607, 609, 669, }; #define YY_REDUCE_COUNT (177) -#define YY_REDUCE_MIN (-244) -#define YY_REDUCE_MAX (414) +#define YY_REDUCE_MIN (-240) +#define YY_REDUCE_MAX (417) static const short yy_reduce_ofst[] = { - /* 0 */ -177, -26, -26, 67, 67, 17, -229, -215, -172, -175, - /* 10 */ -7, 62, 78, 125, 149, 151, 155, -184, -187, -232, - /* 20 */ -205, -8, 16, 47, -190, -9, -185, 44, 71, -188, - /* 30 */ 28, -78, -77, 63, -53, 5, 84, 176, 83, -244, - /* 40 */ -239, -216, -193, -143, -136, -106, -34, -20, 60, 85, - /* 50 */ 89, 136, 160, 167, 185, 195, 196, 197, 198, 215, - /* 60 */ 216, 226, 227, 228, 229, 230, 232, 233, 267, 270, - /* 70 */ 271, 212, 273, 234, 235, 274, 275, 276, 211, 213, - /* 80 */ 236, 239, 284, 285, 287, 295, 296, 300, 303, 305, - /* 90 */ 306, 307, 308, 309, 310, 312, 313, 317, 318, 319, - /* 100 */ 320, 321, 323, 324, 325, 327, 329, 330, 331, 332, - /* 110 */ 333, 335, 336, 337, 339, 340, 341, 342, 343, 344, - /* 120 */ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, - /* 130 */ 355, 356, 357, 358, 359, 225, 238, 240, 360, 247, - /* 140 */ 362, 241, 243, 242, 260, 364, 367, 365, 244, 369, - /* 150 */ 245, 368, 370, 371, 256, 372, 376, 378, 380, 381, - /* 160 */ 383, 385, 384, 387, 391, 389, 386, 388, 393, 402, - /* 170 */ 403, 405, 406, 408, 411, 413, 414, 407, + /* 0 */ -178, -27, -27, 66, 66, 17, -230, -216, -173, -176, + /* 10 */ -45, -8, 61, 68, 77, 124, 148, -185, -188, -233, + /* 20 */ -206, -91, 21, 46, -191, 32, -186, -183, 93, -89, + /* 30 */ -184, -106, 4, 47, 98, 162, 83, 142, 179, -240, + /* 40 */ -217, -194, -117, -96, -51, -18, 25, 94, 105, 153, + /* 50 */ 158, 161, 169, 181, 189, 195, 196, 197, 198, 215, + /* 60 */ 216, 226, 227, 228, 229, 232, 233, 234, 267, 271, + /* 70 */ 272, 213, 275, 235, 236, 277, 282, 283, 206, 208, + /* 80 */ 238, 241, 287, 293, 300, 302, 307, 308, 310, 312, + /* 90 */ 313, 315, 316, 317, 319, 320, 321, 324, 325, 326, + /* 100 */ 327, 328, 330, 331, 332, 333, 334, 335, 336, 337, + /* 110 */ 339, 341, 342, 343, 344, 345, 346, 347, 348, 349, + /* 120 */ 350, 351, 352, 353, 354, 355, 356, 357, 358, 360, + /* 130 */ 361, 362, 285, 363, 364, 217, 224, 225, 366, 230, + /* 140 */ 367, 239, 242, 240, 243, 246, 254, 278, 273, 373, + /* 150 */ 244, 374, 376, 377, 379, 381, 383, 384, 387, 386, + /* 160 */ 388, 391, 389, 393, 396, 392, 399, 394, 398, 400, + /* 170 */ 408, 410, 412, 414, 415, 417, 405, 413, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 773, 885, 831, 897, 819, 828, 1026, 1026, 773, 773, - /* 10 */ 773, 773, 773, 773, 773, 773, 773, 944, 792, 1026, - /* 20 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 828, - /* 30 */ 773, 773, 834, 828, 834, 834, 939, 869, 887, 773, - /* 40 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 50 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 60 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 70 */ 773, 946, 949, 773, 773, 951, 773, 773, 971, 971, - /* 80 */ 937, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 90 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 100 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 817, - /* 110 */ 773, 815, 773, 773, 773, 773, 773, 773, 773, 773, - /* 120 */ 773, 773, 773, 773, 773, 773, 802, 773, 773, 773, - /* 130 */ 773, 773, 794, 794, 794, 773, 773, 773, 794, 773, - /* 140 */ 794, 978, 982, 976, 964, 972, 963, 959, 957, 956, - /* 150 */ 986, 794, 794, 794, 832, 828, 828, 794, 794, 850, - /* 160 */ 848, 846, 838, 844, 840, 842, 836, 820, 794, 826, - /* 170 */ 826, 794, 826, 794, 826, 794, 869, 887, 773, 987, - /* 180 */ 773, 1025, 977, 1015, 1014, 1021, 1013, 1012, 1011, 773, - /* 190 */ 773, 773, 1007, 1008, 1010, 1009, 773, 773, 1017, 1016, - /* 200 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 210 */ 773, 773, 773, 989, 773, 983, 979, 773, 773, 773, - /* 220 */ 773, 773, 773, 773, 773, 773, 899, 773, 773, 773, - /* 230 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 240 */ 773, 773, 936, 773, 773, 773, 773, 947, 773, 773, - /* 250 */ 773, 773, 773, 773, 773, 773, 773, 973, 773, 965, - /* 260 */ 773, 773, 773, 773, 773, 911, 773, 773, 773, 773, - /* 270 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 280 */ 773, 1037, 1035, 773, 773, 773, 1031, 773, 773, 773, - /* 290 */ 1029, 773, 773, 773, 773, 773, 773, 773, 773, 773, - /* 300 */ 773, 773, 773, 773, 773, 773, 773, 773, 853, 773, - /* 310 */ 800, 798, 773, 790, 773, + /* 0 */ 777, 889, 835, 901, 823, 832, 1032, 1032, 777, 777, + /* 10 */ 777, 777, 777, 777, 777, 777, 777, 948, 796, 1032, + /* 20 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 832, + /* 30 */ 777, 777, 838, 832, 838, 838, 943, 873, 891, 777, + /* 40 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 50 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 60 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 70 */ 777, 950, 953, 777, 777, 955, 777, 777, 975, 975, + /* 80 */ 941, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 90 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 100 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 821, + /* 110 */ 777, 819, 777, 777, 777, 777, 777, 777, 777, 777, + /* 120 */ 777, 777, 777, 777, 777, 777, 806, 777, 777, 777, + /* 130 */ 777, 777, 798, 798, 798, 777, 777, 777, 798, 777, + /* 140 */ 798, 982, 986, 980, 968, 976, 967, 963, 961, 960, + /* 150 */ 990, 798, 798, 798, 836, 832, 832, 798, 798, 854, + /* 160 */ 852, 850, 842, 848, 844, 846, 840, 824, 798, 830, + /* 170 */ 830, 798, 830, 798, 830, 798, 873, 891, 777, 991, + /* 180 */ 777, 1031, 981, 1021, 1020, 1027, 1019, 1018, 1017, 777, + /* 190 */ 777, 777, 1013, 1014, 1016, 1015, 777, 777, 1023, 1022, + /* 200 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 210 */ 777, 777, 777, 993, 777, 987, 983, 777, 777, 777, + /* 220 */ 777, 777, 777, 777, 777, 777, 903, 777, 777, 777, + /* 230 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 240 */ 777, 777, 940, 777, 777, 777, 777, 951, 777, 777, + /* 250 */ 777, 777, 777, 777, 777, 777, 777, 977, 777, 969, + /* 260 */ 777, 777, 777, 777, 777, 915, 777, 777, 777, 777, + /* 270 */ 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 280 */ 777, 1043, 1041, 777, 777, 777, 1037, 777, 777, 777, + /* 290 */ 1035, 777, 777, 777, 777, 777, 777, 777, 777, 777, + /* 300 */ 777, 777, 777, 777, 777, 777, 777, 777, 857, 777, + /* 310 */ 804, 802, 777, 794, 777, }; /********** End of lemon-generated parsing tables *****************************/ @@ -714,7 +702,6 @@ struct yyParser { int yyerrcnt; /* Shifts left before out of the error */ #endif ParseARG_SDECL /* A place to hold %extra_argument */ - ParseCTX_SDECL /* A place to hold %extra_context */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ @@ -949,81 +936,82 @@ static const char *const yyTokenName[] = { /* 184 */ "INSERT", /* 185 */ "INTO", /* 186 */ "VALUES", - /* 187 */ "program", - /* 188 */ "cmd", - /* 189 */ "dbPrefix", - /* 190 */ "ids", - /* 191 */ "cpxName", - /* 192 */ "ifexists", - /* 193 */ "alter_db_optr", - /* 194 */ "alter_topic_optr", - /* 195 */ "acct_optr", - /* 196 */ "ifnotexists", - /* 197 */ "db_optr", - /* 198 */ "topic_optr", - /* 199 */ "pps", - /* 200 */ "tseries", - /* 201 */ "dbs", - /* 202 */ "streams", - /* 203 */ "storage", - /* 204 */ "qtime", - /* 205 */ "users", - /* 206 */ "conns", - /* 207 */ "state", - /* 208 */ "keep", - /* 209 */ "tagitemlist", - /* 210 */ "cache", - /* 211 */ "replica", - /* 212 */ "quorum", - /* 213 */ "days", - /* 214 */ "minrows", - /* 215 */ "maxrows", - /* 216 */ "blocks", - /* 217 */ "ctime", - /* 218 */ "wal", - /* 219 */ "fsync", - /* 220 */ "comp", - /* 221 */ "prec", - /* 222 */ "update", - /* 223 */ "cachelast", - /* 224 */ "partitions", - /* 225 */ "typename", - /* 226 */ "signed", - /* 227 */ "create_table_args", - /* 228 */ "create_stable_args", - /* 229 */ "create_table_list", - /* 230 */ "create_from_stable", - /* 231 */ "columnlist", - /* 232 */ "tagNamelist", - /* 233 */ "select", - /* 234 */ "column", - /* 235 */ "tagitem", - /* 236 */ "selcollist", - /* 237 */ "from", - /* 238 */ "where_opt", - /* 239 */ "interval_opt", - /* 240 */ "session_option", - /* 241 */ "fill_opt", - /* 242 */ "sliding_opt", - /* 243 */ "groupby_opt", - /* 244 */ "orderby_opt", - /* 245 */ "having_opt", - /* 246 */ "slimit_opt", - /* 247 */ "limit_opt", - /* 248 */ "union", - /* 249 */ "sclp", - /* 250 */ "distinct", - /* 251 */ "expr", - /* 252 */ "as", - /* 253 */ "tablelist", - /* 254 */ "tmvar", - /* 255 */ "sortlist", - /* 256 */ "sortitem", - /* 257 */ "item", - /* 258 */ "sortorder", - /* 259 */ "grouplist", - /* 260 */ "exprlist", - /* 261 */ "expritem", + /* 187 */ "error", + /* 188 */ "program", + /* 189 */ "cmd", + /* 190 */ "dbPrefix", + /* 191 */ "ids", + /* 192 */ "cpxName", + /* 193 */ "ifexists", + /* 194 */ "alter_db_optr", + /* 195 */ "alter_topic_optr", + /* 196 */ "acct_optr", + /* 197 */ "ifnotexists", + /* 198 */ "db_optr", + /* 199 */ "topic_optr", + /* 200 */ "pps", + /* 201 */ "tseries", + /* 202 */ "dbs", + /* 203 */ "streams", + /* 204 */ "storage", + /* 205 */ "qtime", + /* 206 */ "users", + /* 207 */ "conns", + /* 208 */ "state", + /* 209 */ "keep", + /* 210 */ "tagitemlist", + /* 211 */ "cache", + /* 212 */ "replica", + /* 213 */ "quorum", + /* 214 */ "days", + /* 215 */ "minrows", + /* 216 */ "maxrows", + /* 217 */ "blocks", + /* 218 */ "ctime", + /* 219 */ "wal", + /* 220 */ "fsync", + /* 221 */ "comp", + /* 222 */ "prec", + /* 223 */ "update", + /* 224 */ "cachelast", + /* 225 */ "partitions", + /* 226 */ "typename", + /* 227 */ "signed", + /* 228 */ "create_table_args", + /* 229 */ "create_stable_args", + /* 230 */ "create_table_list", + /* 231 */ "create_from_stable", + /* 232 */ "columnlist", + /* 233 */ "tagNamelist", + /* 234 */ "select", + /* 235 */ "column", + /* 236 */ "tagitem", + /* 237 */ "selcollist", + /* 238 */ "from", + /* 239 */ "where_opt", + /* 240 */ "interval_opt", + /* 241 */ "session_option", + /* 242 */ "fill_opt", + /* 243 */ "sliding_opt", + /* 244 */ "groupby_opt", + /* 245 */ "orderby_opt", + /* 246 */ "having_opt", + /* 247 */ "slimit_opt", + /* 248 */ "limit_opt", + /* 249 */ "union", + /* 250 */ "sclp", + /* 251 */ "distinct", + /* 252 */ "expr", + /* 253 */ "as", + /* 254 */ "tablelist", + /* 255 */ "tmvar", + /* 256 */ "sortlist", + /* 257 */ "sortitem", + /* 258 */ "item", + /* 259 */ "sortorder", + /* 260 */ "grouplist", + /* 261 */ "exprlist", + /* 262 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1256,48 +1244,50 @@ static const char *const yyRuleName[] = { /* 222 */ "expr ::= STRING", /* 223 */ "expr ::= NOW", /* 224 */ "expr ::= VARIABLE", - /* 225 */ "expr ::= BOOL", - /* 226 */ "expr ::= NULL", - /* 227 */ "expr ::= ID LP exprlist RP", - /* 228 */ "expr ::= ID LP STAR RP", - /* 229 */ "expr ::= expr IS NULL", - /* 230 */ "expr ::= expr IS NOT NULL", - /* 231 */ "expr ::= expr LT expr", - /* 232 */ "expr ::= expr GT expr", - /* 233 */ "expr ::= expr LE expr", - /* 234 */ "expr ::= expr GE expr", - /* 235 */ "expr ::= expr NE expr", - /* 236 */ "expr ::= expr EQ expr", - /* 237 */ "expr ::= expr BETWEEN expr AND expr", - /* 238 */ "expr ::= expr AND expr", - /* 239 */ "expr ::= expr OR expr", - /* 240 */ "expr ::= expr PLUS expr", - /* 241 */ "expr ::= expr MINUS expr", - /* 242 */ "expr ::= expr STAR expr", - /* 243 */ "expr ::= expr SLASH expr", - /* 244 */ "expr ::= expr REM expr", - /* 245 */ "expr ::= expr LIKE expr", - /* 246 */ "expr ::= expr IN LP exprlist RP", - /* 247 */ "exprlist ::= exprlist COMMA expritem", - /* 248 */ "exprlist ::= expritem", - /* 249 */ "expritem ::= expr", - /* 250 */ "expritem ::=", - /* 251 */ "cmd ::= RESET QUERY CACHE", - /* 252 */ "cmd ::= SYNCDB ids REPLICA", - /* 253 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 254 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 255 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 256 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 257 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 258 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 259 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 260 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 261 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 262 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 263 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 264 */ "cmd ::= KILL CONNECTION INTEGER", - /* 265 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 266 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 225 */ "expr ::= PLUS VARIABLE", + /* 226 */ "expr ::= MINUS VARIABLE", + /* 227 */ "expr ::= BOOL", + /* 228 */ "expr ::= NULL", + /* 229 */ "expr ::= ID LP exprlist RP", + /* 230 */ "expr ::= ID LP STAR RP", + /* 231 */ "expr ::= expr IS NULL", + /* 232 */ "expr ::= expr IS NOT NULL", + /* 233 */ "expr ::= expr LT expr", + /* 234 */ "expr ::= expr GT expr", + /* 235 */ "expr ::= expr LE expr", + /* 236 */ "expr ::= expr GE expr", + /* 237 */ "expr ::= expr NE expr", + /* 238 */ "expr ::= expr EQ expr", + /* 239 */ "expr ::= expr BETWEEN expr AND expr", + /* 240 */ "expr ::= expr AND expr", + /* 241 */ "expr ::= expr OR expr", + /* 242 */ "expr ::= expr PLUS expr", + /* 243 */ "expr ::= expr MINUS expr", + /* 244 */ "expr ::= expr STAR expr", + /* 245 */ "expr ::= expr SLASH expr", + /* 246 */ "expr ::= expr REM expr", + /* 247 */ "expr ::= expr LIKE expr", + /* 248 */ "expr ::= expr IN LP exprlist RP", + /* 249 */ "exprlist ::= exprlist COMMA expritem", + /* 250 */ "exprlist ::= expritem", + /* 251 */ "expritem ::= expr", + /* 252 */ "expritem ::=", + /* 253 */ "cmd ::= RESET QUERY CACHE", + /* 254 */ "cmd ::= SYNCDB ids REPLICA", + /* 255 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 256 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 257 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 258 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 259 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 260 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 261 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 262 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 263 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 264 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 265 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 266 */ "cmd ::= KILL CONNECTION INTEGER", + /* 267 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 268 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1346,29 +1336,28 @@ static int yyGrowStack(yyParser *p){ /* Initialize a new parser that has already been allocated. */ -void ParseInit(void *yypRawParser ParseCTX_PDECL){ - yyParser *yypParser = (yyParser*)yypRawParser; - ParseCTX_STORE +void ParseInit(void *yypParser){ + yyParser *pParser = (yyParser*)yypParser; #ifdef YYTRACKMAXSTACKDEPTH - yypParser->yyhwm = 0; + pParser->yyhwm = 0; #endif #if YYSTACKDEPTH<=0 - yypParser->yytos = NULL; - yypParser->yystack = NULL; - yypParser->yystksz = 0; - if( yyGrowStack(yypParser) ){ - yypParser->yystack = &yypParser->yystk0; - yypParser->yystksz = 1; + pParser->yytos = NULL; + pParser->yystack = NULL; + pParser->yystksz = 0; + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; } #endif #ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; + pParser->yyerrcnt = -1; #endif - yypParser->yytos = yypParser->yystack; - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; #if YYSTACKDEPTH>0 - yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; + pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; #endif } @@ -1385,14 +1374,11 @@ void ParseInit(void *yypRawParser ParseCTX_PDECL){ ** A pointer to a parser. This pointer is used in subsequent calls ** to Parse and ParseFree. */ -void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ - yyParser *yypParser; - yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( yypParser ){ - ParseCTX_STORE - ParseInit(yypParser ParseCTX_PARAM); - } - return (void*)yypParser; +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ + yyParser *pParser; + pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( pParser ) ParseInit(pParser); + return pParser; } #endif /* Parse_ENGINEALWAYSONSTACK */ @@ -1409,8 +1395,7 @@ static void yy_destructor( YYCODETYPE yymajor, /* Type code for object to destroy */ YYMINORTYPE *yypminor /* The object to be destroyed */ ){ - ParseARG_FETCH - ParseCTX_FETCH + ParseARG_FETCH; switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen @@ -1423,52 +1408,58 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 208: /* keep */ - case 209: /* tagitemlist */ - case 231: /* columnlist */ - case 232: /* tagNamelist */ - case 241: /* fill_opt */ - case 243: /* groupby_opt */ - case 244: /* orderby_opt */ - case 255: /* sortlist */ - case 259: /* grouplist */ + case 209: /* keep */ + case 210: /* tagitemlist */ + case 232: /* columnlist */ + case 233: /* tagNamelist */ + case 242: /* fill_opt */ + case 244: /* groupby_opt */ + case 245: /* orderby_opt */ + case 256: /* sortlist */ + case 260: /* grouplist */ +{ +taosArrayDestroy((yypminor->yy159)); +} + break; + case 230: /* create_table_list */ { -taosArrayDestroy((yypminor->yy429)); +destroyCreateTableSql((yypminor->yy14)); } break; - case 229: /* create_table_list */ + case 234: /* select */ { -destroyCreateTableSql((yypminor->yy194)); +destroySqlNode((yypminor->yy116)); } break; - case 233: /* select */ + case 237: /* selcollist */ + case 250: /* sclp */ + case 261: /* exprlist */ { -destroyQuerySqlNode((yypminor->yy254)); +tSqlExprListDestroy((yypminor->yy159)); } break; - case 236: /* selcollist */ - case 249: /* sclp */ - case 260: /* exprlist */ + case 238: /* from */ + case 254: /* tablelist */ { -tSqlExprListDestroy((yypminor->yy429)); +destroyRelationInfo((yypminor->yy236)); } break; - case 238: /* where_opt */ - case 245: /* having_opt */ - case 251: /* expr */ - case 261: /* expritem */ + case 239: /* where_opt */ + case 246: /* having_opt */ + case 252: /* expr */ + case 262: /* expritem */ { -tSqlExprDestroy((yypminor->yy170)); +tSqlExprDestroy((yypminor->yy118)); } break; - case 248: /* union */ + case 249: /* union */ { -destroyAllSelectClause((yypminor->yy141)); +destroyAllSqlNode((yypminor->yy159)); } break; - case 256: /* sortitem */ + case 257: /* sortitem */ { -tVariantDestroy(&(yypminor->yy218)); +tVariantDestroy(&(yypminor->yy488)); } break; /********* End destructor definitions *****************************************/ @@ -1580,12 +1571,13 @@ int ParseCoverage(FILE *out){ ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. */ -static YYACTIONTYPE yy_find_shift_action( - YYCODETYPE iLookAhead, /* The look-ahead token */ - YYACTIONTYPE stateno /* Current state number */ +static unsigned int yy_find_shift_action( + yyParser *pParser, /* The parser */ + YYCODETYPE iLookAhead /* The look-ahead token */ ){ int i; - + int stateno = pParser->yytos->stateno; + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); #if defined(YYCOVERAGE) @@ -1593,19 +1585,15 @@ static YYACTIONTYPE yy_find_shift_action( #endif do{ i = yy_shift_ofst[stateno]; - assert( i>=0 ); - assert( i<=YY_ACTTAB_COUNT ); - assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); + assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; - assert( i<(int)YY_NLOOKAHEAD ); if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ - assert( iLookAhead %s\n", @@ -1620,8 +1608,15 @@ static YYACTIONTYPE yy_find_shift_action( #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; - assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); - if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ + if( +#if YY_SHIFT_MIN+YYWILDCARD<0 + j>=0 && +#endif +#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT + j0 + ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", @@ -1635,7 +1630,6 @@ static YYACTIONTYPE yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ - assert( i>=0 && iyytos; - yytos->stateno = yyNewState; - yytos->major = yyMajor; + yytos->stateno = (YYACTIONTYPE)yyNewState; + yytos->major = (YYCODETYPE)yyMajor; yytos->minor.yy0 = yyMinor; yyTraceShift(yypParser, yyNewState, "Shift"); } -/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side -** of that rule */ -static const YYCODETYPE yyRuleInfoLhs[] = { - 187, /* (0) program ::= cmd */ - 188, /* (1) cmd ::= SHOW DATABASES */ - 188, /* (2) cmd ::= SHOW TOPICS */ - 188, /* (3) cmd ::= SHOW MNODES */ - 188, /* (4) cmd ::= SHOW DNODES */ - 188, /* (5) cmd ::= SHOW ACCOUNTS */ - 188, /* (6) cmd ::= SHOW USERS */ - 188, /* (7) cmd ::= SHOW MODULES */ - 188, /* (8) cmd ::= SHOW QUERIES */ - 188, /* (9) cmd ::= SHOW CONNECTIONS */ - 188, /* (10) cmd ::= SHOW STREAMS */ - 188, /* (11) cmd ::= SHOW VARIABLES */ - 188, /* (12) cmd ::= SHOW SCORES */ - 188, /* (13) cmd ::= SHOW GRANTS */ - 188, /* (14) cmd ::= SHOW VNODES */ - 188, /* (15) cmd ::= SHOW VNODES IPTOKEN */ - 189, /* (16) dbPrefix ::= */ - 189, /* (17) dbPrefix ::= ids DOT */ - 191, /* (18) cpxName ::= */ - 191, /* (19) cpxName ::= DOT ids */ - 188, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - 188, /* (21) cmd ::= SHOW CREATE DATABASE ids */ - 188, /* (22) cmd ::= SHOW dbPrefix TABLES */ - 188, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - 188, /* (24) cmd ::= SHOW dbPrefix STABLES */ - 188, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - 188, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ - 188, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ - 188, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ - 188, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ - 188, /* (30) cmd ::= DROP DATABASE ifexists ids */ - 188, /* (31) cmd ::= DROP TOPIC ifexists ids */ - 188, /* (32) cmd ::= DROP DNODE ids */ - 188, /* (33) cmd ::= DROP USER ids */ - 188, /* (34) cmd ::= DROP ACCOUNT ids */ - 188, /* (35) cmd ::= USE ids */ - 188, /* (36) cmd ::= DESCRIBE ids cpxName */ - 188, /* (37) cmd ::= ALTER USER ids PASS ids */ - 188, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ - 188, /* (39) cmd ::= ALTER DNODE ids ids */ - 188, /* (40) cmd ::= ALTER DNODE ids ids ids */ - 188, /* (41) cmd ::= ALTER LOCAL ids */ - 188, /* (42) cmd ::= ALTER LOCAL ids ids */ - 188, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ - 188, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ - 188, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ - 188, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - 190, /* (47) ids ::= ID */ - 190, /* (48) ids ::= STRING */ - 192, /* (49) ifexists ::= IF EXISTS */ - 192, /* (50) ifexists ::= */ - 196, /* (51) ifnotexists ::= IF NOT EXISTS */ - 196, /* (52) ifnotexists ::= */ - 188, /* (53) cmd ::= CREATE DNODE ids */ - 188, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - 188, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - 188, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - 188, /* (57) cmd ::= CREATE USER ids PASS ids */ - 199, /* (58) pps ::= */ - 199, /* (59) pps ::= PPS INTEGER */ - 200, /* (60) tseries ::= */ - 200, /* (61) tseries ::= TSERIES INTEGER */ - 201, /* (62) dbs ::= */ - 201, /* (63) dbs ::= DBS INTEGER */ - 202, /* (64) streams ::= */ - 202, /* (65) streams ::= STREAMS INTEGER */ - 203, /* (66) storage ::= */ - 203, /* (67) storage ::= STORAGE INTEGER */ - 204, /* (68) qtime ::= */ - 204, /* (69) qtime ::= QTIME INTEGER */ - 205, /* (70) users ::= */ - 205, /* (71) users ::= USERS INTEGER */ - 206, /* (72) conns ::= */ - 206, /* (73) conns ::= CONNS INTEGER */ - 207, /* (74) state ::= */ - 207, /* (75) state ::= STATE ids */ - 195, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - 208, /* (77) keep ::= KEEP tagitemlist */ - 210, /* (78) cache ::= CACHE INTEGER */ - 211, /* (79) replica ::= REPLICA INTEGER */ - 212, /* (80) quorum ::= QUORUM INTEGER */ - 213, /* (81) days ::= DAYS INTEGER */ - 214, /* (82) minrows ::= MINROWS INTEGER */ - 215, /* (83) maxrows ::= MAXROWS INTEGER */ - 216, /* (84) blocks ::= BLOCKS INTEGER */ - 217, /* (85) ctime ::= CTIME INTEGER */ - 218, /* (86) wal ::= WAL INTEGER */ - 219, /* (87) fsync ::= FSYNC INTEGER */ - 220, /* (88) comp ::= COMP INTEGER */ - 221, /* (89) prec ::= PRECISION STRING */ - 222, /* (90) update ::= UPDATE INTEGER */ - 223, /* (91) cachelast ::= CACHELAST INTEGER */ - 224, /* (92) partitions ::= PARTITIONS INTEGER */ - 197, /* (93) db_optr ::= */ - 197, /* (94) db_optr ::= db_optr cache */ - 197, /* (95) db_optr ::= db_optr replica */ - 197, /* (96) db_optr ::= db_optr quorum */ - 197, /* (97) db_optr ::= db_optr days */ - 197, /* (98) db_optr ::= db_optr minrows */ - 197, /* (99) db_optr ::= db_optr maxrows */ - 197, /* (100) db_optr ::= db_optr blocks */ - 197, /* (101) db_optr ::= db_optr ctime */ - 197, /* (102) db_optr ::= db_optr wal */ - 197, /* (103) db_optr ::= db_optr fsync */ - 197, /* (104) db_optr ::= db_optr comp */ - 197, /* (105) db_optr ::= db_optr prec */ - 197, /* (106) db_optr ::= db_optr keep */ - 197, /* (107) db_optr ::= db_optr update */ - 197, /* (108) db_optr ::= db_optr cachelast */ - 198, /* (109) topic_optr ::= db_optr */ - 198, /* (110) topic_optr ::= topic_optr partitions */ - 193, /* (111) alter_db_optr ::= */ - 193, /* (112) alter_db_optr ::= alter_db_optr replica */ - 193, /* (113) alter_db_optr ::= alter_db_optr quorum */ - 193, /* (114) alter_db_optr ::= alter_db_optr keep */ - 193, /* (115) alter_db_optr ::= alter_db_optr blocks */ - 193, /* (116) alter_db_optr ::= alter_db_optr comp */ - 193, /* (117) alter_db_optr ::= alter_db_optr wal */ - 193, /* (118) alter_db_optr ::= alter_db_optr fsync */ - 193, /* (119) alter_db_optr ::= alter_db_optr update */ - 193, /* (120) alter_db_optr ::= alter_db_optr cachelast */ - 194, /* (121) alter_topic_optr ::= alter_db_optr */ - 194, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ - 225, /* (123) typename ::= ids */ - 225, /* (124) typename ::= ids LP signed RP */ - 225, /* (125) typename ::= ids UNSIGNED */ - 226, /* (126) signed ::= INTEGER */ - 226, /* (127) signed ::= PLUS INTEGER */ - 226, /* (128) signed ::= MINUS INTEGER */ - 188, /* (129) cmd ::= CREATE TABLE create_table_args */ - 188, /* (130) cmd ::= CREATE TABLE create_stable_args */ - 188, /* (131) cmd ::= CREATE STABLE create_stable_args */ - 188, /* (132) cmd ::= CREATE TABLE create_table_list */ - 229, /* (133) create_table_list ::= create_from_stable */ - 229, /* (134) create_table_list ::= create_table_list create_from_stable */ - 227, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - 228, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - 230, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - 230, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - 232, /* (139) tagNamelist ::= tagNamelist COMMA ids */ - 232, /* (140) tagNamelist ::= ids */ - 227, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ - 231, /* (142) columnlist ::= columnlist COMMA column */ - 231, /* (143) columnlist ::= column */ - 234, /* (144) column ::= ids typename */ - 209, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ - 209, /* (146) tagitemlist ::= tagitem */ - 235, /* (147) tagitem ::= INTEGER */ - 235, /* (148) tagitem ::= FLOAT */ - 235, /* (149) tagitem ::= STRING */ - 235, /* (150) tagitem ::= BOOL */ - 235, /* (151) tagitem ::= NULL */ - 235, /* (152) tagitem ::= MINUS INTEGER */ - 235, /* (153) tagitem ::= MINUS FLOAT */ - 235, /* (154) tagitem ::= PLUS INTEGER */ - 235, /* (155) tagitem ::= PLUS FLOAT */ - 233, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - 233, /* (157) select ::= LP select RP */ - 248, /* (158) union ::= select */ - 248, /* (159) union ::= union UNION ALL select */ - 188, /* (160) cmd ::= union */ - 233, /* (161) select ::= SELECT selcollist */ - 249, /* (162) sclp ::= selcollist COMMA */ - 249, /* (163) sclp ::= */ - 236, /* (164) selcollist ::= sclp distinct expr as */ - 236, /* (165) selcollist ::= sclp STAR */ - 252, /* (166) as ::= AS ids */ - 252, /* (167) as ::= ids */ - 252, /* (168) as ::= */ - 250, /* (169) distinct ::= DISTINCT */ - 250, /* (170) distinct ::= */ - 237, /* (171) from ::= FROM tablelist */ - 237, /* (172) from ::= FROM LP union RP */ - 253, /* (173) tablelist ::= ids cpxName */ - 253, /* (174) tablelist ::= ids cpxName ids */ - 253, /* (175) tablelist ::= tablelist COMMA ids cpxName */ - 253, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ - 254, /* (177) tmvar ::= VARIABLE */ - 239, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ - 239, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - 239, /* (180) interval_opt ::= */ - 240, /* (181) session_option ::= */ - 240, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 241, /* (183) fill_opt ::= */ - 241, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - 241, /* (185) fill_opt ::= FILL LP ID RP */ - 242, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ - 242, /* (187) sliding_opt ::= */ - 244, /* (188) orderby_opt ::= */ - 244, /* (189) orderby_opt ::= ORDER BY sortlist */ - 255, /* (190) sortlist ::= sortlist COMMA item sortorder */ - 255, /* (191) sortlist ::= item sortorder */ - 257, /* (192) item ::= ids cpxName */ - 258, /* (193) sortorder ::= ASC */ - 258, /* (194) sortorder ::= DESC */ - 258, /* (195) sortorder ::= */ - 243, /* (196) groupby_opt ::= */ - 243, /* (197) groupby_opt ::= GROUP BY grouplist */ - 259, /* (198) grouplist ::= grouplist COMMA item */ - 259, /* (199) grouplist ::= item */ - 245, /* (200) having_opt ::= */ - 245, /* (201) having_opt ::= HAVING expr */ - 247, /* (202) limit_opt ::= */ - 247, /* (203) limit_opt ::= LIMIT signed */ - 247, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ - 247, /* (205) limit_opt ::= LIMIT signed COMMA signed */ - 246, /* (206) slimit_opt ::= */ - 246, /* (207) slimit_opt ::= SLIMIT signed */ - 246, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ - 246, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ - 238, /* (210) where_opt ::= */ - 238, /* (211) where_opt ::= WHERE expr */ - 251, /* (212) expr ::= LP expr RP */ - 251, /* (213) expr ::= ID */ - 251, /* (214) expr ::= ID DOT ID */ - 251, /* (215) expr ::= ID DOT STAR */ - 251, /* (216) expr ::= INTEGER */ - 251, /* (217) expr ::= MINUS INTEGER */ - 251, /* (218) expr ::= PLUS INTEGER */ - 251, /* (219) expr ::= FLOAT */ - 251, /* (220) expr ::= MINUS FLOAT */ - 251, /* (221) expr ::= PLUS FLOAT */ - 251, /* (222) expr ::= STRING */ - 251, /* (223) expr ::= NOW */ - 251, /* (224) expr ::= VARIABLE */ - 251, /* (225) expr ::= BOOL */ - 251, /* (226) expr ::= NULL */ - 251, /* (227) expr ::= ID LP exprlist RP */ - 251, /* (228) expr ::= ID LP STAR RP */ - 251, /* (229) expr ::= expr IS NULL */ - 251, /* (230) expr ::= expr IS NOT NULL */ - 251, /* (231) expr ::= expr LT expr */ - 251, /* (232) expr ::= expr GT expr */ - 251, /* (233) expr ::= expr LE expr */ - 251, /* (234) expr ::= expr GE expr */ - 251, /* (235) expr ::= expr NE expr */ - 251, /* (236) expr ::= expr EQ expr */ - 251, /* (237) expr ::= expr BETWEEN expr AND expr */ - 251, /* (238) expr ::= expr AND expr */ - 251, /* (239) expr ::= expr OR expr */ - 251, /* (240) expr ::= expr PLUS expr */ - 251, /* (241) expr ::= expr MINUS expr */ - 251, /* (242) expr ::= expr STAR expr */ - 251, /* (243) expr ::= expr SLASH expr */ - 251, /* (244) expr ::= expr REM expr */ - 251, /* (245) expr ::= expr LIKE expr */ - 251, /* (246) expr ::= expr IN LP exprlist RP */ - 260, /* (247) exprlist ::= exprlist COMMA expritem */ - 260, /* (248) exprlist ::= expritem */ - 261, /* (249) expritem ::= expr */ - 261, /* (250) expritem ::= */ - 188, /* (251) cmd ::= RESET QUERY CACHE */ - 188, /* (252) cmd ::= SYNCDB ids REPLICA */ - 188, /* (253) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - 188, /* (254) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 188, /* (255) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - 188, /* (256) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - 188, /* (257) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - 188, /* (258) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 188, /* (259) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 188, /* (260) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 188, /* (261) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 188, /* (262) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 188, /* (263) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 188, /* (264) cmd ::= KILL CONNECTION INTEGER */ - 188, /* (265) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 188, /* (266) cmd ::= KILL QUERY INTEGER COLON INTEGER */ -}; - -/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number -** of symbols on the right-hand side of that rule. */ -static const signed char yyRuleInfoNRhs[] = { - -1, /* (0) program ::= cmd */ - -2, /* (1) cmd ::= SHOW DATABASES */ - -2, /* (2) cmd ::= SHOW TOPICS */ - -2, /* (3) cmd ::= SHOW MNODES */ - -2, /* (4) cmd ::= SHOW DNODES */ - -2, /* (5) cmd ::= SHOW ACCOUNTS */ - -2, /* (6) cmd ::= SHOW USERS */ - -2, /* (7) cmd ::= SHOW MODULES */ - -2, /* (8) cmd ::= SHOW QUERIES */ - -2, /* (9) cmd ::= SHOW CONNECTIONS */ - -2, /* (10) cmd ::= SHOW STREAMS */ - -2, /* (11) cmd ::= SHOW VARIABLES */ - -2, /* (12) cmd ::= SHOW SCORES */ - -2, /* (13) cmd ::= SHOW GRANTS */ - -2, /* (14) cmd ::= SHOW VNODES */ - -3, /* (15) cmd ::= SHOW VNODES IPTOKEN */ - 0, /* (16) dbPrefix ::= */ - -2, /* (17) dbPrefix ::= ids DOT */ - 0, /* (18) cpxName ::= */ - -2, /* (19) cpxName ::= DOT ids */ - -5, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - -4, /* (21) cmd ::= SHOW CREATE DATABASE ids */ - -3, /* (22) cmd ::= SHOW dbPrefix TABLES */ - -5, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - -3, /* (24) cmd ::= SHOW dbPrefix STABLES */ - -5, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - -3, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ - -4, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ - -5, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ - -5, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ - -4, /* (30) cmd ::= DROP DATABASE ifexists ids */ - -4, /* (31) cmd ::= DROP TOPIC ifexists ids */ - -3, /* (32) cmd ::= DROP DNODE ids */ - -3, /* (33) cmd ::= DROP USER ids */ - -3, /* (34) cmd ::= DROP ACCOUNT ids */ - -2, /* (35) cmd ::= USE ids */ - -3, /* (36) cmd ::= DESCRIBE ids cpxName */ - -5, /* (37) cmd ::= ALTER USER ids PASS ids */ - -5, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ - -4, /* (39) cmd ::= ALTER DNODE ids ids */ - -5, /* (40) cmd ::= ALTER DNODE ids ids ids */ - -3, /* (41) cmd ::= ALTER LOCAL ids */ - -4, /* (42) cmd ::= ALTER LOCAL ids ids */ - -4, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ - -4, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ - -4, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ - -6, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - -1, /* (47) ids ::= ID */ - -1, /* (48) ids ::= STRING */ - -2, /* (49) ifexists ::= IF EXISTS */ - 0, /* (50) ifexists ::= */ - -3, /* (51) ifnotexists ::= IF NOT EXISTS */ - 0, /* (52) ifnotexists ::= */ - -3, /* (53) cmd ::= CREATE DNODE ids */ - -6, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - -5, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - -5, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - -5, /* (57) cmd ::= CREATE USER ids PASS ids */ - 0, /* (58) pps ::= */ - -2, /* (59) pps ::= PPS INTEGER */ - 0, /* (60) tseries ::= */ - -2, /* (61) tseries ::= TSERIES INTEGER */ - 0, /* (62) dbs ::= */ - -2, /* (63) dbs ::= DBS INTEGER */ - 0, /* (64) streams ::= */ - -2, /* (65) streams ::= STREAMS INTEGER */ - 0, /* (66) storage ::= */ - -2, /* (67) storage ::= STORAGE INTEGER */ - 0, /* (68) qtime ::= */ - -2, /* (69) qtime ::= QTIME INTEGER */ - 0, /* (70) users ::= */ - -2, /* (71) users ::= USERS INTEGER */ - 0, /* (72) conns ::= */ - -2, /* (73) conns ::= CONNS INTEGER */ - 0, /* (74) state ::= */ - -2, /* (75) state ::= STATE ids */ - -9, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - -2, /* (77) keep ::= KEEP tagitemlist */ - -2, /* (78) cache ::= CACHE INTEGER */ - -2, /* (79) replica ::= REPLICA INTEGER */ - -2, /* (80) quorum ::= QUORUM INTEGER */ - -2, /* (81) days ::= DAYS INTEGER */ - -2, /* (82) minrows ::= MINROWS INTEGER */ - -2, /* (83) maxrows ::= MAXROWS INTEGER */ - -2, /* (84) blocks ::= BLOCKS INTEGER */ - -2, /* (85) ctime ::= CTIME INTEGER */ - -2, /* (86) wal ::= WAL INTEGER */ - -2, /* (87) fsync ::= FSYNC INTEGER */ - -2, /* (88) comp ::= COMP INTEGER */ - -2, /* (89) prec ::= PRECISION STRING */ - -2, /* (90) update ::= UPDATE INTEGER */ - -2, /* (91) cachelast ::= CACHELAST INTEGER */ - -2, /* (92) partitions ::= PARTITIONS INTEGER */ - 0, /* (93) db_optr ::= */ - -2, /* (94) db_optr ::= db_optr cache */ - -2, /* (95) db_optr ::= db_optr replica */ - -2, /* (96) db_optr ::= db_optr quorum */ - -2, /* (97) db_optr ::= db_optr days */ - -2, /* (98) db_optr ::= db_optr minrows */ - -2, /* (99) db_optr ::= db_optr maxrows */ - -2, /* (100) db_optr ::= db_optr blocks */ - -2, /* (101) db_optr ::= db_optr ctime */ - -2, /* (102) db_optr ::= db_optr wal */ - -2, /* (103) db_optr ::= db_optr fsync */ - -2, /* (104) db_optr ::= db_optr comp */ - -2, /* (105) db_optr ::= db_optr prec */ - -2, /* (106) db_optr ::= db_optr keep */ - -2, /* (107) db_optr ::= db_optr update */ - -2, /* (108) db_optr ::= db_optr cachelast */ - -1, /* (109) topic_optr ::= db_optr */ - -2, /* (110) topic_optr ::= topic_optr partitions */ - 0, /* (111) alter_db_optr ::= */ - -2, /* (112) alter_db_optr ::= alter_db_optr replica */ - -2, /* (113) alter_db_optr ::= alter_db_optr quorum */ - -2, /* (114) alter_db_optr ::= alter_db_optr keep */ - -2, /* (115) alter_db_optr ::= alter_db_optr blocks */ - -2, /* (116) alter_db_optr ::= alter_db_optr comp */ - -2, /* (117) alter_db_optr ::= alter_db_optr wal */ - -2, /* (118) alter_db_optr ::= alter_db_optr fsync */ - -2, /* (119) alter_db_optr ::= alter_db_optr update */ - -2, /* (120) alter_db_optr ::= alter_db_optr cachelast */ - -1, /* (121) alter_topic_optr ::= alter_db_optr */ - -2, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ - -1, /* (123) typename ::= ids */ - -4, /* (124) typename ::= ids LP signed RP */ - -2, /* (125) typename ::= ids UNSIGNED */ - -1, /* (126) signed ::= INTEGER */ - -2, /* (127) signed ::= PLUS INTEGER */ - -2, /* (128) signed ::= MINUS INTEGER */ - -3, /* (129) cmd ::= CREATE TABLE create_table_args */ - -3, /* (130) cmd ::= CREATE TABLE create_stable_args */ - -3, /* (131) cmd ::= CREATE STABLE create_stable_args */ - -3, /* (132) cmd ::= CREATE TABLE create_table_list */ - -1, /* (133) create_table_list ::= create_from_stable */ - -2, /* (134) create_table_list ::= create_table_list create_from_stable */ - -6, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - -10, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - -10, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - -13, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - -3, /* (139) tagNamelist ::= tagNamelist COMMA ids */ - -1, /* (140) tagNamelist ::= ids */ - -5, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ - -3, /* (142) columnlist ::= columnlist COMMA column */ - -1, /* (143) columnlist ::= column */ - -2, /* (144) column ::= ids typename */ - -3, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ - -1, /* (146) tagitemlist ::= tagitem */ - -1, /* (147) tagitem ::= INTEGER */ - -1, /* (148) tagitem ::= FLOAT */ - -1, /* (149) tagitem ::= STRING */ - -1, /* (150) tagitem ::= BOOL */ - -1, /* (151) tagitem ::= NULL */ - -2, /* (152) tagitem ::= MINUS INTEGER */ - -2, /* (153) tagitem ::= MINUS FLOAT */ - -2, /* (154) tagitem ::= PLUS INTEGER */ - -2, /* (155) tagitem ::= PLUS FLOAT */ - -13, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - -3, /* (157) select ::= LP select RP */ - -1, /* (158) union ::= select */ - -4, /* (159) union ::= union UNION ALL select */ - -1, /* (160) cmd ::= union */ - -2, /* (161) select ::= SELECT selcollist */ - -2, /* (162) sclp ::= selcollist COMMA */ - 0, /* (163) sclp ::= */ - -4, /* (164) selcollist ::= sclp distinct expr as */ - -2, /* (165) selcollist ::= sclp STAR */ - -2, /* (166) as ::= AS ids */ - -1, /* (167) as ::= ids */ - 0, /* (168) as ::= */ - -1, /* (169) distinct ::= DISTINCT */ - 0, /* (170) distinct ::= */ - -2, /* (171) from ::= FROM tablelist */ - -4, /* (172) from ::= FROM LP union RP */ - -2, /* (173) tablelist ::= ids cpxName */ - -3, /* (174) tablelist ::= ids cpxName ids */ - -4, /* (175) tablelist ::= tablelist COMMA ids cpxName */ - -5, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ - -1, /* (177) tmvar ::= VARIABLE */ - -4, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ - -6, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - 0, /* (180) interval_opt ::= */ - 0, /* (181) session_option ::= */ - -7, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 0, /* (183) fill_opt ::= */ - -6, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - -4, /* (185) fill_opt ::= FILL LP ID RP */ - -4, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ - 0, /* (187) sliding_opt ::= */ - 0, /* (188) orderby_opt ::= */ - -3, /* (189) orderby_opt ::= ORDER BY sortlist */ - -4, /* (190) sortlist ::= sortlist COMMA item sortorder */ - -2, /* (191) sortlist ::= item sortorder */ - -2, /* (192) item ::= ids cpxName */ - -1, /* (193) sortorder ::= ASC */ - -1, /* (194) sortorder ::= DESC */ - 0, /* (195) sortorder ::= */ - 0, /* (196) groupby_opt ::= */ - -3, /* (197) groupby_opt ::= GROUP BY grouplist */ - -3, /* (198) grouplist ::= grouplist COMMA item */ - -1, /* (199) grouplist ::= item */ - 0, /* (200) having_opt ::= */ - -2, /* (201) having_opt ::= HAVING expr */ - 0, /* (202) limit_opt ::= */ - -2, /* (203) limit_opt ::= LIMIT signed */ - -4, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ - -4, /* (205) limit_opt ::= LIMIT signed COMMA signed */ - 0, /* (206) slimit_opt ::= */ - -2, /* (207) slimit_opt ::= SLIMIT signed */ - -4, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ - -4, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ - 0, /* (210) where_opt ::= */ - -2, /* (211) where_opt ::= WHERE expr */ - -3, /* (212) expr ::= LP expr RP */ - -1, /* (213) expr ::= ID */ - -3, /* (214) expr ::= ID DOT ID */ - -3, /* (215) expr ::= ID DOT STAR */ - -1, /* (216) expr ::= INTEGER */ - -2, /* (217) expr ::= MINUS INTEGER */ - -2, /* (218) expr ::= PLUS INTEGER */ - -1, /* (219) expr ::= FLOAT */ - -2, /* (220) expr ::= MINUS FLOAT */ - -2, /* (221) expr ::= PLUS FLOAT */ - -1, /* (222) expr ::= STRING */ - -1, /* (223) expr ::= NOW */ - -1, /* (224) expr ::= VARIABLE */ - -1, /* (225) expr ::= BOOL */ - -1, /* (226) expr ::= NULL */ - -4, /* (227) expr ::= ID LP exprlist RP */ - -4, /* (228) expr ::= ID LP STAR RP */ - -3, /* (229) expr ::= expr IS NULL */ - -4, /* (230) expr ::= expr IS NOT NULL */ - -3, /* (231) expr ::= expr LT expr */ - -3, /* (232) expr ::= expr GT expr */ - -3, /* (233) expr ::= expr LE expr */ - -3, /* (234) expr ::= expr GE expr */ - -3, /* (235) expr ::= expr NE expr */ - -3, /* (236) expr ::= expr EQ expr */ - -5, /* (237) expr ::= expr BETWEEN expr AND expr */ - -3, /* (238) expr ::= expr AND expr */ - -3, /* (239) expr ::= expr OR expr */ - -3, /* (240) expr ::= expr PLUS expr */ - -3, /* (241) expr ::= expr MINUS expr */ - -3, /* (242) expr ::= expr STAR expr */ - -3, /* (243) expr ::= expr SLASH expr */ - -3, /* (244) expr ::= expr REM expr */ - -3, /* (245) expr ::= expr LIKE expr */ - -5, /* (246) expr ::= expr IN LP exprlist RP */ - -3, /* (247) exprlist ::= exprlist COMMA expritem */ - -1, /* (248) exprlist ::= expritem */ - -1, /* (249) expritem ::= expr */ - 0, /* (250) expritem ::= */ - -3, /* (251) cmd ::= RESET QUERY CACHE */ - -3, /* (252) cmd ::= SYNCDB ids REPLICA */ - -7, /* (253) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (254) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -7, /* (255) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - -7, /* (256) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - -8, /* (257) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (258) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (259) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (260) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (261) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (262) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (263) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -3, /* (264) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (265) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (266) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +/* The following table contains information about every rule that +** is used during the reduce. +*/ +static const struct { + YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ + signed char nrhs; /* Negative of the number of RHS symbols in the rule */ +} yyRuleInfo[] = { + { 188, -1 }, /* (0) program ::= cmd */ + { 189, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 189, -2 }, /* (2) cmd ::= SHOW TOPICS */ + { 189, -2 }, /* (3) cmd ::= SHOW MNODES */ + { 189, -2 }, /* (4) cmd ::= SHOW DNODES */ + { 189, -2 }, /* (5) cmd ::= SHOW ACCOUNTS */ + { 189, -2 }, /* (6) cmd ::= SHOW USERS */ + { 189, -2 }, /* (7) cmd ::= SHOW MODULES */ + { 189, -2 }, /* (8) cmd ::= SHOW QUERIES */ + { 189, -2 }, /* (9) cmd ::= SHOW CONNECTIONS */ + { 189, -2 }, /* (10) cmd ::= SHOW STREAMS */ + { 189, -2 }, /* (11) cmd ::= SHOW VARIABLES */ + { 189, -2 }, /* (12) cmd ::= SHOW SCORES */ + { 189, -2 }, /* (13) cmd ::= SHOW GRANTS */ + { 189, -2 }, /* (14) cmd ::= SHOW VNODES */ + { 189, -3 }, /* (15) cmd ::= SHOW VNODES IPTOKEN */ + { 190, 0 }, /* (16) dbPrefix ::= */ + { 190, -2 }, /* (17) dbPrefix ::= ids DOT */ + { 192, 0 }, /* (18) cpxName ::= */ + { 192, -2 }, /* (19) cpxName ::= DOT ids */ + { 189, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ + { 189, -4 }, /* (21) cmd ::= SHOW CREATE DATABASE ids */ + { 189, -3 }, /* (22) cmd ::= SHOW dbPrefix TABLES */ + { 189, -5 }, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 189, -3 }, /* (24) cmd ::= SHOW dbPrefix STABLES */ + { 189, -5 }, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 189, -3 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ + { 189, -4 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 189, -5 }, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ + { 189, -5 }, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ + { 189, -4 }, /* (30) cmd ::= DROP DATABASE ifexists ids */ + { 189, -4 }, /* (31) cmd ::= DROP TOPIC ifexists ids */ + { 189, -3 }, /* (32) cmd ::= DROP DNODE ids */ + { 189, -3 }, /* (33) cmd ::= DROP USER ids */ + { 189, -3 }, /* (34) cmd ::= DROP ACCOUNT ids */ + { 189, -2 }, /* (35) cmd ::= USE ids */ + { 189, -3 }, /* (36) cmd ::= DESCRIBE ids cpxName */ + { 189, -5 }, /* (37) cmd ::= ALTER USER ids PASS ids */ + { 189, -5 }, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 189, -4 }, /* (39) cmd ::= ALTER DNODE ids ids */ + { 189, -5 }, /* (40) cmd ::= ALTER DNODE ids ids ids */ + { 189, -3 }, /* (41) cmd ::= ALTER LOCAL ids */ + { 189, -4 }, /* (42) cmd ::= ALTER LOCAL ids ids */ + { 189, -4 }, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 189, -4 }, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ + { 189, -4 }, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 189, -6 }, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 191, -1 }, /* (47) ids ::= ID */ + { 191, -1 }, /* (48) ids ::= STRING */ + { 193, -2 }, /* (49) ifexists ::= IF EXISTS */ + { 193, 0 }, /* (50) ifexists ::= */ + { 197, -3 }, /* (51) ifnotexists ::= IF NOT EXISTS */ + { 197, 0 }, /* (52) ifnotexists ::= */ + { 189, -3 }, /* (53) cmd ::= CREATE DNODE ids */ + { 189, -6 }, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 189, -5 }, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 189, -5 }, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + { 189, -5 }, /* (57) cmd ::= CREATE USER ids PASS ids */ + { 200, 0 }, /* (58) pps ::= */ + { 200, -2 }, /* (59) pps ::= PPS INTEGER */ + { 201, 0 }, /* (60) tseries ::= */ + { 201, -2 }, /* (61) tseries ::= TSERIES INTEGER */ + { 202, 0 }, /* (62) dbs ::= */ + { 202, -2 }, /* (63) dbs ::= DBS INTEGER */ + { 203, 0 }, /* (64) streams ::= */ + { 203, -2 }, /* (65) streams ::= STREAMS INTEGER */ + { 204, 0 }, /* (66) storage ::= */ + { 204, -2 }, /* (67) storage ::= STORAGE INTEGER */ + { 205, 0 }, /* (68) qtime ::= */ + { 205, -2 }, /* (69) qtime ::= QTIME INTEGER */ + { 206, 0 }, /* (70) users ::= */ + { 206, -2 }, /* (71) users ::= USERS INTEGER */ + { 207, 0 }, /* (72) conns ::= */ + { 207, -2 }, /* (73) conns ::= CONNS INTEGER */ + { 208, 0 }, /* (74) state ::= */ + { 208, -2 }, /* (75) state ::= STATE ids */ + { 196, -9 }, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 209, -2 }, /* (77) keep ::= KEEP tagitemlist */ + { 211, -2 }, /* (78) cache ::= CACHE INTEGER */ + { 212, -2 }, /* (79) replica ::= REPLICA INTEGER */ + { 213, -2 }, /* (80) quorum ::= QUORUM INTEGER */ + { 214, -2 }, /* (81) days ::= DAYS INTEGER */ + { 215, -2 }, /* (82) minrows ::= MINROWS INTEGER */ + { 216, -2 }, /* (83) maxrows ::= MAXROWS INTEGER */ + { 217, -2 }, /* (84) blocks ::= BLOCKS INTEGER */ + { 218, -2 }, /* (85) ctime ::= CTIME INTEGER */ + { 219, -2 }, /* (86) wal ::= WAL INTEGER */ + { 220, -2 }, /* (87) fsync ::= FSYNC INTEGER */ + { 221, -2 }, /* (88) comp ::= COMP INTEGER */ + { 222, -2 }, /* (89) prec ::= PRECISION STRING */ + { 223, -2 }, /* (90) update ::= UPDATE INTEGER */ + { 224, -2 }, /* (91) cachelast ::= CACHELAST INTEGER */ + { 225, -2 }, /* (92) partitions ::= PARTITIONS INTEGER */ + { 198, 0 }, /* (93) db_optr ::= */ + { 198, -2 }, /* (94) db_optr ::= db_optr cache */ + { 198, -2 }, /* (95) db_optr ::= db_optr replica */ + { 198, -2 }, /* (96) db_optr ::= db_optr quorum */ + { 198, -2 }, /* (97) db_optr ::= db_optr days */ + { 198, -2 }, /* (98) db_optr ::= db_optr minrows */ + { 198, -2 }, /* (99) db_optr ::= db_optr maxrows */ + { 198, -2 }, /* (100) db_optr ::= db_optr blocks */ + { 198, -2 }, /* (101) db_optr ::= db_optr ctime */ + { 198, -2 }, /* (102) db_optr ::= db_optr wal */ + { 198, -2 }, /* (103) db_optr ::= db_optr fsync */ + { 198, -2 }, /* (104) db_optr ::= db_optr comp */ + { 198, -2 }, /* (105) db_optr ::= db_optr prec */ + { 198, -2 }, /* (106) db_optr ::= db_optr keep */ + { 198, -2 }, /* (107) db_optr ::= db_optr update */ + { 198, -2 }, /* (108) db_optr ::= db_optr cachelast */ + { 199, -1 }, /* (109) topic_optr ::= db_optr */ + { 199, -2 }, /* (110) topic_optr ::= topic_optr partitions */ + { 194, 0 }, /* (111) alter_db_optr ::= */ + { 194, -2 }, /* (112) alter_db_optr ::= alter_db_optr replica */ + { 194, -2 }, /* (113) alter_db_optr ::= alter_db_optr quorum */ + { 194, -2 }, /* (114) alter_db_optr ::= alter_db_optr keep */ + { 194, -2 }, /* (115) alter_db_optr ::= alter_db_optr blocks */ + { 194, -2 }, /* (116) alter_db_optr ::= alter_db_optr comp */ + { 194, -2 }, /* (117) alter_db_optr ::= alter_db_optr wal */ + { 194, -2 }, /* (118) alter_db_optr ::= alter_db_optr fsync */ + { 194, -2 }, /* (119) alter_db_optr ::= alter_db_optr update */ + { 194, -2 }, /* (120) alter_db_optr ::= alter_db_optr cachelast */ + { 195, -1 }, /* (121) alter_topic_optr ::= alter_db_optr */ + { 195, -2 }, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ + { 226, -1 }, /* (123) typename ::= ids */ + { 226, -4 }, /* (124) typename ::= ids LP signed RP */ + { 226, -2 }, /* (125) typename ::= ids UNSIGNED */ + { 227, -1 }, /* (126) signed ::= INTEGER */ + { 227, -2 }, /* (127) signed ::= PLUS INTEGER */ + { 227, -2 }, /* (128) signed ::= MINUS INTEGER */ + { 189, -3 }, /* (129) cmd ::= CREATE TABLE create_table_args */ + { 189, -3 }, /* (130) cmd ::= CREATE TABLE create_stable_args */ + { 189, -3 }, /* (131) cmd ::= CREATE STABLE create_stable_args */ + { 189, -3 }, /* (132) cmd ::= CREATE TABLE create_table_list */ + { 230, -1 }, /* (133) create_table_list ::= create_from_stable */ + { 230, -2 }, /* (134) create_table_list ::= create_table_list create_from_stable */ + { 228, -6 }, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + { 229, -10 }, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + { 231, -10 }, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + { 231, -13 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + { 233, -3 }, /* (139) tagNamelist ::= tagNamelist COMMA ids */ + { 233, -1 }, /* (140) tagNamelist ::= ids */ + { 228, -5 }, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ + { 232, -3 }, /* (142) columnlist ::= columnlist COMMA column */ + { 232, -1 }, /* (143) columnlist ::= column */ + { 235, -2 }, /* (144) column ::= ids typename */ + { 210, -3 }, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ + { 210, -1 }, /* (146) tagitemlist ::= tagitem */ + { 236, -1 }, /* (147) tagitem ::= INTEGER */ + { 236, -1 }, /* (148) tagitem ::= FLOAT */ + { 236, -1 }, /* (149) tagitem ::= STRING */ + { 236, -1 }, /* (150) tagitem ::= BOOL */ + { 236, -1 }, /* (151) tagitem ::= NULL */ + { 236, -2 }, /* (152) tagitem ::= MINUS INTEGER */ + { 236, -2 }, /* (153) tagitem ::= MINUS FLOAT */ + { 236, -2 }, /* (154) tagitem ::= PLUS INTEGER */ + { 236, -2 }, /* (155) tagitem ::= PLUS FLOAT */ + { 234, -13 }, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 234, -3 }, /* (157) select ::= LP select RP */ + { 249, -1 }, /* (158) union ::= select */ + { 249, -4 }, /* (159) union ::= union UNION ALL select */ + { 189, -1 }, /* (160) cmd ::= union */ + { 234, -2 }, /* (161) select ::= SELECT selcollist */ + { 250, -2 }, /* (162) sclp ::= selcollist COMMA */ + { 250, 0 }, /* (163) sclp ::= */ + { 237, -4 }, /* (164) selcollist ::= sclp distinct expr as */ + { 237, -2 }, /* (165) selcollist ::= sclp STAR */ + { 253, -2 }, /* (166) as ::= AS ids */ + { 253, -1 }, /* (167) as ::= ids */ + { 253, 0 }, /* (168) as ::= */ + { 251, -1 }, /* (169) distinct ::= DISTINCT */ + { 251, 0 }, /* (170) distinct ::= */ + { 238, -2 }, /* (171) from ::= FROM tablelist */ + { 238, -4 }, /* (172) from ::= FROM LP union RP */ + { 254, -2 }, /* (173) tablelist ::= ids cpxName */ + { 254, -3 }, /* (174) tablelist ::= ids cpxName ids */ + { 254, -4 }, /* (175) tablelist ::= tablelist COMMA ids cpxName */ + { 254, -5 }, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ + { 255, -1 }, /* (177) tmvar ::= VARIABLE */ + { 240, -4 }, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ + { 240, -6 }, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 240, 0 }, /* (180) interval_opt ::= */ + { 241, 0 }, /* (181) session_option ::= */ + { 241, -7 }, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + { 242, 0 }, /* (183) fill_opt ::= */ + { 242, -6 }, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 242, -4 }, /* (185) fill_opt ::= FILL LP ID RP */ + { 243, -4 }, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ + { 243, 0 }, /* (187) sliding_opt ::= */ + { 245, 0 }, /* (188) orderby_opt ::= */ + { 245, -3 }, /* (189) orderby_opt ::= ORDER BY sortlist */ + { 256, -4 }, /* (190) sortlist ::= sortlist COMMA item sortorder */ + { 256, -2 }, /* (191) sortlist ::= item sortorder */ + { 258, -2 }, /* (192) item ::= ids cpxName */ + { 259, -1 }, /* (193) sortorder ::= ASC */ + { 259, -1 }, /* (194) sortorder ::= DESC */ + { 259, 0 }, /* (195) sortorder ::= */ + { 244, 0 }, /* (196) groupby_opt ::= */ + { 244, -3 }, /* (197) groupby_opt ::= GROUP BY grouplist */ + { 260, -3 }, /* (198) grouplist ::= grouplist COMMA item */ + { 260, -1 }, /* (199) grouplist ::= item */ + { 246, 0 }, /* (200) having_opt ::= */ + { 246, -2 }, /* (201) having_opt ::= HAVING expr */ + { 248, 0 }, /* (202) limit_opt ::= */ + { 248, -2 }, /* (203) limit_opt ::= LIMIT signed */ + { 248, -4 }, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ + { 248, -4 }, /* (205) limit_opt ::= LIMIT signed COMMA signed */ + { 247, 0 }, /* (206) slimit_opt ::= */ + { 247, -2 }, /* (207) slimit_opt ::= SLIMIT signed */ + { 247, -4 }, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 247, -4 }, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ + { 239, 0 }, /* (210) where_opt ::= */ + { 239, -2 }, /* (211) where_opt ::= WHERE expr */ + { 252, -3 }, /* (212) expr ::= LP expr RP */ + { 252, -1 }, /* (213) expr ::= ID */ + { 252, -3 }, /* (214) expr ::= ID DOT ID */ + { 252, -3 }, /* (215) expr ::= ID DOT STAR */ + { 252, -1 }, /* (216) expr ::= INTEGER */ + { 252, -2 }, /* (217) expr ::= MINUS INTEGER */ + { 252, -2 }, /* (218) expr ::= PLUS INTEGER */ + { 252, -1 }, /* (219) expr ::= FLOAT */ + { 252, -2 }, /* (220) expr ::= MINUS FLOAT */ + { 252, -2 }, /* (221) expr ::= PLUS FLOAT */ + { 252, -1 }, /* (222) expr ::= STRING */ + { 252, -1 }, /* (223) expr ::= NOW */ + { 252, -1 }, /* (224) expr ::= VARIABLE */ + { 252, -2 }, /* (225) expr ::= PLUS VARIABLE */ + { 252, -2 }, /* (226) expr ::= MINUS VARIABLE */ + { 252, -1 }, /* (227) expr ::= BOOL */ + { 252, -1 }, /* (228) expr ::= NULL */ + { 252, -4 }, /* (229) expr ::= ID LP exprlist RP */ + { 252, -4 }, /* (230) expr ::= ID LP STAR RP */ + { 252, -3 }, /* (231) expr ::= expr IS NULL */ + { 252, -4 }, /* (232) expr ::= expr IS NOT NULL */ + { 252, -3 }, /* (233) expr ::= expr LT expr */ + { 252, -3 }, /* (234) expr ::= expr GT expr */ + { 252, -3 }, /* (235) expr ::= expr LE expr */ + { 252, -3 }, /* (236) expr ::= expr GE expr */ + { 252, -3 }, /* (237) expr ::= expr NE expr */ + { 252, -3 }, /* (238) expr ::= expr EQ expr */ + { 252, -5 }, /* (239) expr ::= expr BETWEEN expr AND expr */ + { 252, -3 }, /* (240) expr ::= expr AND expr */ + { 252, -3 }, /* (241) expr ::= expr OR expr */ + { 252, -3 }, /* (242) expr ::= expr PLUS expr */ + { 252, -3 }, /* (243) expr ::= expr MINUS expr */ + { 252, -3 }, /* (244) expr ::= expr STAR expr */ + { 252, -3 }, /* (245) expr ::= expr SLASH expr */ + { 252, -3 }, /* (246) expr ::= expr REM expr */ + { 252, -3 }, /* (247) expr ::= expr LIKE expr */ + { 252, -5 }, /* (248) expr ::= expr IN LP exprlist RP */ + { 261, -3 }, /* (249) exprlist ::= exprlist COMMA expritem */ + { 261, -1 }, /* (250) exprlist ::= expritem */ + { 262, -1 }, /* (251) expritem ::= expr */ + { 262, 0 }, /* (252) expritem ::= */ + { 189, -3 }, /* (253) cmd ::= RESET QUERY CACHE */ + { 189, -3 }, /* (254) cmd ::= SYNCDB ids REPLICA */ + { 189, -7 }, /* (255) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (256) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (257) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (258) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (259) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 189, -9 }, /* (260) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 189, -7 }, /* (261) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (262) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (263) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (264) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (265) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + { 189, -3 }, /* (266) cmd ::= KILL CONNECTION INTEGER */ + { 189, -5 }, /* (267) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 189, -5 }, /* (268) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2310,34 +2036,30 @@ static void yy_accept(yyParser*); /* Forward Declaration */ ** only called from one place, optimizing compilers will in-line it, which ** means that the extra parameters have no performance impact. */ -static YYACTIONTYPE yy_reduce( +static void yy_reduce( yyParser *yypParser, /* The parser */ unsigned int yyruleno, /* Number of the rule by which to reduce */ int yyLookahead, /* Lookahead token, or YYNOCODE if none */ ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ - ParseCTX_PDECL /* %extra_context */ ){ int yygoto; /* The next state */ - YYACTIONTYPE yyact; /* The next action */ + int yyact; /* The next action */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ - ParseARG_FETCH + ParseARG_FETCH; (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfoNRhs[yyruleno]; + yysize = yyRuleInfo[yyruleno].nrhs; if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", yyTracePrompt, - yyruleno, yyRuleName[yyruleno], - yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ yypParser->yyhwm++; @@ -2355,19 +2077,13 @@ static YYACTIONTYPE yy_reduce( #if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; + return; } #else if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; + return; } yymsp = yypParser->yytos; } @@ -2552,13 +2268,13 @@ static YYACTIONTYPE yy_reduce( break; case 43: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 44: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==44); -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy94, &t);} +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &t);} break; case 45: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy351);} break; case 46: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; case 47: /* ids ::= ID */ case 48: /* ids ::= STRING */ yytestcase(yyruleno==48); @@ -2580,11 +2296,11 @@ static YYACTIONTYPE yy_reduce( { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 54: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; case 55: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 56: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==56); -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy94, &yymsp[-2].minor.yy0);} +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &yymsp[-2].minor.yy0);} break; case 57: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} @@ -2613,20 +2329,20 @@ static YYACTIONTYPE yy_reduce( break; case 76: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy419.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy419.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy419.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy419.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy419.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy419.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy419.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy419.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy419.stat = yymsp[0].minor.yy0; -} - yymsp[-8].minor.yy419 = yylhsminor.yy419; + yylhsminor.yy351.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy351.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy351.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy351.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy351.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy351.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy351.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy351.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy351.stat = yymsp[0].minor.yy0; +} + yymsp[-8].minor.yy351 = yylhsminor.yy351; break; case 77: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy429 = yymsp[0].minor.yy429; } +{ yymsp[-1].minor.yy159 = yymsp[0].minor.yy159; } break; case 78: /* cache ::= CACHE INTEGER */ case 79: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==79); @@ -2646,234 +2362,234 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 93: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy94); yymsp[1].minor.yy94.dbType = TSDB_DB_TYPE_DEFAULT;} +{setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 94: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 95: /* db_optr ::= db_optr replica */ case 112: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==112); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 96: /* db_optr ::= db_optr quorum */ case 113: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==113); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 97: /* db_optr ::= db_optr days */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 98: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 99: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 100: /* db_optr ::= db_optr blocks */ case 115: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==115); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 101: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 102: /* db_optr ::= db_optr wal */ case 117: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==117); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 103: /* db_optr ::= db_optr fsync */ case 118: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==118); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 104: /* db_optr ::= db_optr comp */ case 116: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==116); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 105: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 106: /* db_optr ::= db_optr keep */ case 114: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==114); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.keep = yymsp[0].minor.yy429; } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.keep = yymsp[0].minor.yy159; } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 107: /* db_optr ::= db_optr update */ case 119: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==119); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 108: /* db_optr ::= db_optr cachelast */ case 120: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==120); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 109: /* topic_optr ::= db_optr */ case 121: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==121); -{ yylhsminor.yy94 = yymsp[0].minor.yy94; yylhsminor.yy94.dbType = TSDB_DB_TYPE_TOPIC; } - yymsp[0].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[0].minor.yy322; yylhsminor.yy322.dbType = TSDB_DB_TYPE_TOPIC; } + yymsp[0].minor.yy322 = yylhsminor.yy322; break; case 110: /* topic_optr ::= topic_optr partitions */ case 122: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==122); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 111: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy94); yymsp[1].minor.yy94.dbType = TSDB_DB_TYPE_DEFAULT;} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 123: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSetColumnType (&yylhsminor.yy451, &yymsp[0].minor.yy0); + tSetColumnType (&yylhsminor.yy407, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy451 = yylhsminor.yy451; + yymsp[0].minor.yy407 = yylhsminor.yy407; break; case 124: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy481 <= 0) { + if (yymsp[-1].minor.yy317 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSetColumnType(&yylhsminor.yy451, &yymsp[-3].minor.yy0); + tSetColumnType(&yylhsminor.yy407, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy481; // negative value of name length - tSetColumnType(&yylhsminor.yy451, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy317; // negative value of name length + tSetColumnType(&yylhsminor.yy407, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy451 = yylhsminor.yy451; + yymsp[-3].minor.yy407 = yylhsminor.yy407; break; case 125: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSetColumnType (&yylhsminor.yy451, &yymsp[-1].minor.yy0); + tSetColumnType (&yylhsminor.yy407, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy451 = yylhsminor.yy451; + yymsp[-1].minor.yy407 = yylhsminor.yy407; break; case 126: /* signed ::= INTEGER */ -{ yylhsminor.yy481 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy481 = yylhsminor.yy481; +{ yylhsminor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy317 = yylhsminor.yy317; break; case 127: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy481 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 128: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy481 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy317 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; case 132: /* cmd ::= CREATE TABLE create_table_list */ -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy194;} +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy14;} break; case 133: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy252); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy206); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy194 = pCreateTable; + yylhsminor.yy14 = pCreateTable; } - yymsp[0].minor.yy194 = yylhsminor.yy194; + yymsp[0].minor.yy14 = yylhsminor.yy14; break; case 134: /* create_table_list ::= create_table_list create_from_stable */ { - taosArrayPush(yymsp[-1].minor.yy194->childTableInfo, &yymsp[0].minor.yy252); - yylhsminor.yy194 = yymsp[-1].minor.yy194; + taosArrayPush(yymsp[-1].minor.yy14->childTableInfo, &yymsp[0].minor.yy206); + yylhsminor.yy14 = yymsp[-1].minor.yy14; } - yymsp[-1].minor.yy194 = yylhsminor.yy194; + yymsp[-1].minor.yy14 = yylhsminor.yy14; break; case 135: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yylhsminor.yy194 = tSetCreateTableInfo(yymsp[-1].minor.yy429, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-1].minor.yy159, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy194 = yylhsminor.yy194; + yymsp[-5].minor.yy14 = yylhsminor.yy14; break; case 136: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yylhsminor.yy194 = tSetCreateTableInfo(yymsp[-5].minor.yy429, yymsp[-1].minor.yy429, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-5].minor.yy159, yymsp[-1].minor.yy159, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy194 = yylhsminor.yy194; + yymsp[-9].minor.yy14 = yylhsminor.yy14; break; case 137: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy252 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy429, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy206 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy159, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy252 = yylhsminor.yy252; + yymsp[-9].minor.yy206 = yylhsminor.yy206; break; case 138: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; - yylhsminor.yy252 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy429, yymsp[-1].minor.yy429, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); + yylhsminor.yy206 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy159, yymsp[-1].minor.yy159, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } - yymsp[-12].minor.yy252 = yylhsminor.yy252; + yymsp[-12].minor.yy206 = yylhsminor.yy206; break; case 139: /* tagNamelist ::= tagNamelist COMMA ids */ -{taosArrayPush(yymsp[-2].minor.yy429, &yymsp[0].minor.yy0); yylhsminor.yy429 = yymsp[-2].minor.yy429; } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy0); yylhsminor.yy159 = yymsp[-2].minor.yy159; } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 140: /* tagNamelist ::= ids */ -{yylhsminor.yy429 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy429, &yymsp[0].minor.yy0);} - yymsp[0].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 141: /* create_table_args ::= ifnotexists ids cpxName AS select */ { - yylhsminor.yy194 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy254, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy116, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy194 = yylhsminor.yy194; + yymsp[-4].minor.yy14 = yylhsminor.yy14; break; case 142: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy429, &yymsp[0].minor.yy451); yylhsminor.yy429 = yymsp[-2].minor.yy429; } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy407); yylhsminor.yy159 = yymsp[-2].minor.yy159; } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 143: /* columnlist ::= column */ -{yylhsminor.yy429 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy429, &yymsp[0].minor.yy451);} - yymsp[0].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy407);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 144: /* column ::= ids typename */ { - tSetColumnInfo(&yylhsminor.yy451, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy451); + tSetColumnInfo(&yylhsminor.yy407, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy407); } - yymsp[-1].minor.yy451 = yylhsminor.yy451; + yymsp[-1].minor.yy407 = yylhsminor.yy407; break; case 145: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy429 = tVariantListAppend(yymsp[-2].minor.yy429, &yymsp[0].minor.yy218, -1); } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{ yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 146: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[0].minor.yy218, -1); } - yymsp[0].minor.yy429 = yylhsminor.yy429; +{ yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 147: /* tagitem ::= INTEGER */ case 148: /* tagitem ::= FLOAT */ yytestcase(yyruleno==148); case 149: /* tagitem ::= STRING */ yytestcase(yyruleno==149); case 150: /* tagitem ::= BOOL */ yytestcase(yyruleno==150); -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy218, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy218 = yylhsminor.yy218; +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy488 = yylhsminor.yy488; break; case 151: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy218, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy218 = yylhsminor.yy218; +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy488 = yylhsminor.yy488; break; case 152: /* tagitem ::= MINUS INTEGER */ case 153: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==153); @@ -2883,56 +2599,56 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy218, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy488, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy218 = yylhsminor.yy218; + yymsp[-1].minor.yy488 = yylhsminor.yy488; break; case 156: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy429, yymsp[-10].minor.yy70, yymsp[-9].minor.yy170, yymsp[-4].minor.yy429, yymsp[-3].minor.yy429, &yymsp[-8].minor.yy220, &yymsp[-7].minor.yy87, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy429, &yymsp[0].minor.yy18, &yymsp[-1].minor.yy18, yymsp[-2].minor.yy170); + yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy159, yymsp[-10].minor.yy236, yymsp[-9].minor.yy118, yymsp[-4].minor.yy159, yymsp[-3].minor.yy159, &yymsp[-8].minor.yy184, &yymsp[-7].minor.yy249, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy159, &yymsp[0].minor.yy440, &yymsp[-1].minor.yy440, yymsp[-2].minor.yy118); } - yymsp[-12].minor.yy254 = yylhsminor.yy254; + yymsp[-12].minor.yy116 = yylhsminor.yy116; break; case 157: /* select ::= LP select RP */ -{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} +{yymsp[-2].minor.yy116 = yymsp[-1].minor.yy116;} break; case 158: /* union ::= select */ -{ yylhsminor.yy141 = setSubclause(NULL, yymsp[0].minor.yy254); } - yymsp[0].minor.yy141 = yylhsminor.yy141; +{ yylhsminor.yy159 = setSubclause(NULL, yymsp[0].minor.yy116); } + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 159: /* union ::= union UNION ALL select */ -{ yylhsminor.yy141 = appendSelectClause(yymsp[-3].minor.yy141, yymsp[0].minor.yy254); } - yymsp[-3].minor.yy141 = yylhsminor.yy141; +{ yylhsminor.yy159 = appendSelectClause(yymsp[-3].minor.yy159, yymsp[0].minor.yy116); } + yymsp[-3].minor.yy159 = yylhsminor.yy159; break; case 160: /* cmd ::= union */ -{ setSqlInfo(pInfo, yymsp[0].minor.yy141, NULL, TSDB_SQL_SELECT); } +{ setSqlInfo(pInfo, yymsp[0].minor.yy159, NULL, TSDB_SQL_SELECT); } break; case 161: /* select ::= SELECT selcollist */ { - yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy429, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy116 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy159, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy254 = yylhsminor.yy254; + yymsp[-1].minor.yy116 = yylhsminor.yy116; break; case 162: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy429 = yymsp[-1].minor.yy429;} - yymsp[-1].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = yymsp[-1].minor.yy159;} + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 163: /* sclp ::= */ case 188: /* orderby_opt ::= */ yytestcase(yyruleno==188); -{yymsp[1].minor.yy429 = 0;} +{yymsp[1].minor.yy159 = 0;} break; case 164: /* selcollist ::= sclp distinct expr as */ { - yylhsminor.yy429 = tSqlExprListAppend(yymsp[-3].minor.yy429, yymsp[-1].minor.yy170, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy159 = tSqlExprListAppend(yymsp[-3].minor.yy159, yymsp[-1].minor.yy118, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-3].minor.yy429 = yylhsminor.yy429; + yymsp[-3].minor.yy159 = yylhsminor.yy159; break; case 165: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); - yylhsminor.yy429 = tSqlExprListAppend(yymsp[-1].minor.yy429, pNode, 0, 0); + yylhsminor.yy159 = tSqlExprListAppend(yymsp[-1].minor.yy159, pNode, 0, 0); } - yymsp[-1].minor.yy429 = yylhsminor.yy429; + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 166: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } @@ -2949,71 +2665,64 @@ static YYACTIONTYPE yy_reduce( yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 171: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy70 = yymsp[0].minor.yy429;} +{yymsp[-1].minor.yy236 = yymsp[0].minor.yy236;} break; case 172: /* from ::= FROM LP union RP */ -{yymsp[-3].minor.yy70 = yymsp[-1].minor.yy141;} +{yymsp[-3].minor.yy236 = setSubquery(NULL, yymsp[-1].minor.yy159);} break; case 173: /* tablelist ::= ids cpxName */ { - toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy429 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy236 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } - yymsp[-1].minor.yy429 = yylhsminor.yy429; + yymsp[-1].minor.yy236 = yylhsminor.yy236; break; case 174: /* tablelist ::= ids cpxName ids */ { - toTSDBType(yymsp[-2].minor.yy0.type); - toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy429 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy236 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy429 = yylhsminor.yy429; + yymsp[-2].minor.yy236 = yylhsminor.yy236; break; case 175: /* tablelist ::= tablelist COMMA ids cpxName */ { - toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy429 = setTableNameList(yymsp[-3].minor.yy429, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy236 = setTableNameList(yymsp[-3].minor.yy236, &yymsp[-1].minor.yy0, NULL); } - yymsp[-3].minor.yy429 = yylhsminor.yy429; + yymsp[-3].minor.yy236 = yylhsminor.yy236; break; case 176: /* tablelist ::= tablelist COMMA ids cpxName ids */ { - toTSDBType(yymsp[-2].minor.yy0.type); - toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - - yylhsminor.yy429 = setTableNameList(yymsp[-4].minor.yy429, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy236 = setTableNameList(yymsp[-4].minor.yy236, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-4].minor.yy429 = yylhsminor.yy429; + yymsp[-4].minor.yy236 = yylhsminor.yy236; break; case 177: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 178: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy220.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy220.offset.n = 0;} +{yymsp[-3].minor.yy184.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy184.offset.n = 0;} break; case 179: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy220.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy220.offset = yymsp[-1].minor.yy0;} +{yymsp[-5].minor.yy184.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy184.offset = yymsp[-1].minor.yy0;} break; case 180: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy220, 0, sizeof(yymsp[1].minor.yy220));} +{memset(&yymsp[1].minor.yy184, 0, sizeof(yymsp[1].minor.yy184));} break; case 181: /* session_option ::= */ -{yymsp[1].minor.yy87.col.n = 0; yymsp[1].minor.yy87.gap.n = 0;} +{yymsp[1].minor.yy249.col.n = 0; yymsp[1].minor.yy249.gap.n = 0;} break; case 182: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - yymsp[-6].minor.yy87.col = yymsp[-4].minor.yy0; - yymsp[-6].minor.yy87.gap = yymsp[-1].minor.yy0; + yymsp[-6].minor.yy249.col = yymsp[-4].minor.yy0; + yymsp[-6].minor.yy249.gap = yymsp[-1].minor.yy0; } break; case 183: /* fill_opt ::= */ -{ yymsp[1].minor.yy429 = 0; } +{ yymsp[1].minor.yy159 = 0; } break; case 184: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { @@ -3021,14 +2730,14 @@ static YYACTIONTYPE yy_reduce( toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy429, &A, -1, 0); - yymsp[-5].minor.yy429 = yymsp[-1].minor.yy429; + tVariantListInsert(yymsp[-1].minor.yy159, &A, -1, 0); + yymsp[-5].minor.yy159 = yymsp[-1].minor.yy159; } break; case 185: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy429 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy159 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; case 186: /* sliding_opt ::= SLIDING LP tmvar RP */ @@ -3038,245 +2747,250 @@ static YYACTIONTYPE yy_reduce( {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; case 189: /* orderby_opt ::= ORDER BY sortlist */ -{yymsp[-2].minor.yy429 = yymsp[0].minor.yy429;} +{yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; case 190: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy429 = tVariantListAppend(yymsp[-3].minor.yy429, &yymsp[-1].minor.yy218, yymsp[0].minor.yy116); + yylhsminor.yy159 = tVariantListAppend(yymsp[-3].minor.yy159, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } - yymsp[-3].minor.yy429 = yylhsminor.yy429; + yymsp[-3].minor.yy159 = yylhsminor.yy159; break; case 191: /* sortlist ::= item sortorder */ { - yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[-1].minor.yy218, yymsp[0].minor.yy116); + yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } - yymsp[-1].minor.yy429 = yylhsminor.yy429; + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 192: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy218, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy488, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy218 = yylhsminor.yy218; + yymsp[-1].minor.yy488 = yylhsminor.yy488; break; case 193: /* sortorder ::= ASC */ -{ yymsp[0].minor.yy116 = TSDB_ORDER_ASC; } +{ yymsp[0].minor.yy20 = TSDB_ORDER_ASC; } break; case 194: /* sortorder ::= DESC */ -{ yymsp[0].minor.yy116 = TSDB_ORDER_DESC;} +{ yymsp[0].minor.yy20 = TSDB_ORDER_DESC;} break; case 195: /* sortorder ::= */ -{ yymsp[1].minor.yy116 = TSDB_ORDER_ASC; } +{ yymsp[1].minor.yy20 = TSDB_ORDER_ASC; } break; case 196: /* groupby_opt ::= */ -{ yymsp[1].minor.yy429 = 0;} +{ yymsp[1].minor.yy159 = 0;} break; case 197: /* groupby_opt ::= GROUP BY grouplist */ -{ yymsp[-2].minor.yy429 = yymsp[0].minor.yy429;} +{ yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; case 198: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy429 = tVariantListAppend(yymsp[-2].minor.yy429, &yymsp[0].minor.yy218, -1); + yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } - yymsp[-2].minor.yy429 = yylhsminor.yy429; + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 199: /* grouplist ::= item */ { - yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[0].minor.yy218, -1); + yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } - yymsp[0].minor.yy429 = yylhsminor.yy429; + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 200: /* having_opt ::= */ case 210: /* where_opt ::= */ yytestcase(yyruleno==210); - case 250: /* expritem ::= */ yytestcase(yyruleno==250); -{yymsp[1].minor.yy170 = 0;} + case 252: /* expritem ::= */ yytestcase(yyruleno==252); +{yymsp[1].minor.yy118 = 0;} break; case 201: /* having_opt ::= HAVING expr */ case 211: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==211); -{yymsp[-1].minor.yy170 = yymsp[0].minor.yy170;} +{yymsp[-1].minor.yy118 = yymsp[0].minor.yy118;} break; case 202: /* limit_opt ::= */ case 206: /* slimit_opt ::= */ yytestcase(yyruleno==206); -{yymsp[1].minor.yy18.limit = -1; yymsp[1].minor.yy18.offset = 0;} +{yymsp[1].minor.yy440.limit = -1; yymsp[1].minor.yy440.offset = 0;} break; case 203: /* limit_opt ::= LIMIT signed */ case 207: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==207); -{yymsp[-1].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-1].minor.yy18.offset = 0;} +{yymsp[-1].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-1].minor.yy440.offset = 0;} break; case 204: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy18.limit = yymsp[-2].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[0].minor.yy481;} +{ yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; case 205: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[-2].minor.yy481;} +{ yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; case 208: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy18.limit = yymsp[-2].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[0].minor.yy481;} +{yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; case 209: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[-2].minor.yy481;} +{yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; case 212: /* expr ::= LP expr RP */ -{yylhsminor.yy170 = yymsp[-1].minor.yy170; yylhsminor.yy170->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy170->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{yylhsminor.yy118 = yymsp[-1].minor.yy118; yylhsminor.yy118->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy118->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 213: /* expr ::= ID */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 214: /* expr ::= ID DOT ID */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 215: /* expr ::= ID DOT STAR */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 216: /* expr ::= INTEGER */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 217: /* expr ::= MINUS INTEGER */ case 218: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==218); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy170 = yylhsminor.yy170; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy118 = yylhsminor.yy118; break; case 219: /* expr ::= FLOAT */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 220: /* expr ::= MINUS FLOAT */ case 221: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==221); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy170 = yylhsminor.yy170; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy118 = yylhsminor.yy118; break; case 222: /* expr ::= STRING */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 223: /* expr ::= NOW */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 224: /* expr ::= VARIABLE */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy118 = yylhsminor.yy118; + break; + case 225: /* expr ::= PLUS VARIABLE */ + case 226: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==226); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} + yymsp[-1].minor.yy118 = yylhsminor.yy118; break; - case 225: /* expr ::= BOOL */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy170 = yylhsminor.yy170; + case 227: /* expr ::= BOOL */ +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 226: /* expr ::= NULL */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} - yymsp[0].minor.yy170 = yylhsminor.yy170; + case 228: /* expr ::= NULL */ +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 227: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy170 = tSqlExprCreateFunction(yymsp[-1].minor.yy429, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 229: /* expr ::= ID LP exprlist RP */ +{ yylhsminor.yy118 = tSqlExprCreateFunction(yymsp[-1].minor.yy159, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 228: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy170 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 230: /* expr ::= ID LP STAR RP */ +{ yylhsminor.yy118 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 229: /* expr ::= expr IS NULL */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, NULL, TK_ISNULL);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 231: /* expr ::= expr IS NULL */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, NULL, TK_ISNULL);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 230: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-3].minor.yy170, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 232: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-3].minor.yy118, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 231: /* expr ::= expr LT expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LT);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 233: /* expr ::= expr LT expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LT);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 232: /* expr ::= expr GT expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_GT);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 234: /* expr ::= expr GT expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GT);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 233: /* expr ::= expr LE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 235: /* expr ::= expr LE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 234: /* expr ::= expr GE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_GE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 236: /* expr ::= expr GE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 235: /* expr ::= expr NE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_NE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 237: /* expr ::= expr NE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_NE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 236: /* expr ::= expr EQ expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_EQ);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 238: /* expr ::= expr EQ expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_EQ);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 237: /* expr ::= expr BETWEEN expr AND expr */ -{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy170); yylhsminor.yy170 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy170, yymsp[-2].minor.yy170, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy170, TK_LE), TK_AND);} - yymsp[-4].minor.yy170 = yylhsminor.yy170; + case 239: /* expr ::= expr BETWEEN expr AND expr */ +{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy118); yylhsminor.yy118 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy118, yymsp[-2].minor.yy118, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy118, TK_LE), TK_AND);} + yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 238: /* expr ::= expr AND expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_AND);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 240: /* expr ::= expr AND expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_AND);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 239: /* expr ::= expr OR expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_OR); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 241: /* expr ::= expr OR expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_OR); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 240: /* expr ::= expr PLUS expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_PLUS); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 242: /* expr ::= expr PLUS expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_PLUS); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 241: /* expr ::= expr MINUS expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_MINUS); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 243: /* expr ::= expr MINUS expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_MINUS); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 242: /* expr ::= expr STAR expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_STAR); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 244: /* expr ::= expr STAR expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_STAR); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 243: /* expr ::= expr SLASH expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_DIVIDE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 245: /* expr ::= expr SLASH expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_DIVIDE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 244: /* expr ::= expr REM expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_REM); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 246: /* expr ::= expr REM expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_REM); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 245: /* expr ::= expr LIKE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LIKE); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 247: /* expr ::= expr LIKE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LIKE); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 246: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-4].minor.yy170, (tSqlExpr*)yymsp[-1].minor.yy429, TK_IN); } - yymsp[-4].minor.yy170 = yylhsminor.yy170; + case 248: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-4].minor.yy118, (tSqlExpr*)yymsp[-1].minor.yy159, TK_IN); } + yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 247: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy429 = tSqlExprListAppend(yymsp[-2].minor.yy429,yymsp[0].minor.yy170,0, 0);} - yymsp[-2].minor.yy429 = yylhsminor.yy429; + case 249: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy159 = tSqlExprListAppend(yymsp[-2].minor.yy159,yymsp[0].minor.yy118,0, 0);} + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 248: /* exprlist ::= expritem */ -{yylhsminor.yy429 = tSqlExprListAppend(0,yymsp[0].minor.yy170,0, 0);} - yymsp[0].minor.yy429 = yylhsminor.yy429; + case 250: /* exprlist ::= expritem */ +{yylhsminor.yy159 = tSqlExprListAppend(0,yymsp[0].minor.yy118,0, 0);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 249: /* expritem ::= expr */ -{yylhsminor.yy170 = yymsp[0].minor.yy170;} - yymsp[0].minor.yy170 = yylhsminor.yy170; + case 251: /* expritem ::= expr */ +{yylhsminor.yy118 = yymsp[0].minor.yy118;} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 251: /* cmd ::= RESET QUERY CACHE */ + case 253: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 252: /* cmd ::= SYNCDB ids REPLICA */ + case 254: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} break; - case 253: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 255: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 254: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 256: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3287,14 +3001,14 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 255: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 257: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 256: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 258: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3305,7 +3019,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 257: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 259: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3319,26 +3033,26 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 258: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 260: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy218, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy488, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 259: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 261: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 260: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 262: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3349,14 +3063,14 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 261: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 263: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 262: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 264: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3367,7 +3081,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 263: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 265: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3381,22 +3095,22 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 264: /* cmd ::= KILL CONNECTION INTEGER */ + case 266: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 265: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 267: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 266: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 268: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyrulenostateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yyTraceShift(yypParser, yyact, "... then shift"); - return yyact; } /* @@ -3421,8 +3134,7 @@ static YYACTIONTYPE yy_reduce( static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH - ParseCTX_FETCH + ParseARG_FETCH; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); @@ -3433,8 +3145,7 @@ static void yy_parse_failed( ** parser fails */ /************ Begin %parse_failure code ***************************************/ /************ End %parse_failure code *****************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ } #endif /* YYNOERRORRECOVERY */ @@ -3446,8 +3157,7 @@ static void yy_syntax_error( int yymajor, /* The major type of the error token */ ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ - ParseARG_FETCH - ParseCTX_FETCH + ParseARG_FETCH; #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ @@ -3473,8 +3183,7 @@ static void yy_syntax_error( assert(len <= outputBufLen); /************ End %syntax_error code ******************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ } /* @@ -3483,8 +3192,7 @@ static void yy_syntax_error( static void yy_accept( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH - ParseCTX_FETCH + ParseARG_FETCH; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); @@ -3499,8 +3207,7 @@ static void yy_accept( /*********** Begin %parse_accept code *****************************************/ /*********** End %parse_accept code *******************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ } /* The main parser program. @@ -3529,47 +3236,45 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - YYACTIONTYPE yyact; /* The parser action. */ + unsigned int yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif #ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ #endif - yyParser *yypParser = (yyParser*)yyp; /* The parser */ - ParseCTX_FETCH - ParseARG_STORE + yyParser *yypParser; /* The parser */ + yypParser = (yyParser*)yyp; assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif + ParseARG_STORE; - yyact = yypParser->yytos->stateno; #ifndef NDEBUG if( yyTraceFILE ){ - if( yyact < YY_MIN_REDUCE ){ + int stateno = yypParser->yytos->stateno; + if( stateno < YY_MIN_REDUCE ){ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", - yyTracePrompt,yyTokenName[yymajor],yyact); + yyTracePrompt,yyTokenName[yymajor],stateno); }else{ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", - yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); + yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); } } #endif do{ - assert( yyact==yypParser->yytos->stateno ); - yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); + yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); if( yyact >= YY_MIN_REDUCE ){ - yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, - yyminor ParseCTX_PARAM); + yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ - yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); + yy_shift(yypParser,yyact,yymajor,yyminor); #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; #endif - break; + yymajor = YYNOCODE; }else if( yyact==YY_ACCEPT_ACTION ){ yypParser->yytos--; yy_accept(yypParser); @@ -3620,9 +3325,10 @@ void Parse( yymajor = YYNOCODE; }else{ while( yypParser->yytos >= yypParser->yystack + && yymx != YYERRORSYMBOL && (yyact = yy_find_reduce_action( yypParser->yytos->stateno, - YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE + YYERRORSYMBOL)) >= YY_MIN_REDUCE ){ yy_pop_parser_stack(yypParser); } @@ -3639,8 +3345,6 @@ void Parse( } yypParser->yyerrcnt = 3; yyerrorhit = 1; - if( yymajor==YYNOCODE ) break; - yyact = yypParser->yytos->stateno; #elif defined(YYNOERRORRECOVERY) /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to ** do any kind of error recovery. Instead, simply invoke the syntax @@ -3651,7 +3355,8 @@ void Parse( */ yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - break; + yymajor = YYNOCODE; + #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** @@ -3673,10 +3378,10 @@ void Parse( yypParser->yyerrcnt = -1; #endif } - break; + yymajor = YYNOCODE; #endif } - }while( yypParser->yytos>yypParser->yystack ); + }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -3691,17 +3396,3 @@ void Parse( #endif return; } - -/* -** Return the fallback token corresponding to canonical token iToken, or -** 0 if iToken has no fallback. -*/ -int ParseFallback(int iToken){ -#ifdef YYFALLBACK - assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); - return yyFallback[iToken]; -#else - (void)iToken; - return 0; -#endif -} diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 7205bafd7d01ede8f11233e7554b7082ae3d53c7..db3c72c2fc8a1a1c326c37974bfde0741f93004b 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime*30); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); @@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { taosTmrStopA(&pConn->pTimer); // set the idle timer to monitor the activity - taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime*30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); + taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); rpcSendMsgToPeer(pConn, msg, msgLen); // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured @@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont } if ( rpcIsReq(pHead->msgType) ) { - terrno = rpcProcessReqHead(pConn, pHead); pConn->connType = pRecv->connType; + terrno = rpcProcessReqHead(pConn, pHead); // stop idle timer taosTmrStopA(&pConn->pIdleTimer); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index cd97b2a9d6d5b8a29bd9670a5061c4f0af57d7d4..9df25409de62f9b2579d43800fd6a5709bd41f1a 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -123,7 +123,7 @@ typedef struct STsdbQueryHandle { SMemRef *pMemRef; SArray *defaultLoadColumn;// default load column SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */ - SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */ + SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQueryAttr */ SArray *prev; // previous row which is before than time window SArray *next; // next row which is after the query time window @@ -286,7 +286,7 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa } taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %"PRIu64, pQueryHandle, info.tableId.uid, + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" 0x%"PRIx64, pQueryHandle, info.tableId.uid, info.tableId.tid, info.lastKey, pQueryHandle->qId); } } @@ -440,7 +440,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable tsdbMayTakeMemSnapshot(pQueryHandle, psTable); - tsdbDebug("%p total numOfTable:%" PRIzu " in query, %"PRIu64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId); + tsdbDebug("%p total numOfTable:%" PRIzu " in query, 0x%"PRIx64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId); return (TsdbQueryHandleT) pQueryHandle; } @@ -651,7 +651,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 - "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64, + "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, pCheckInfo->lastKey, pMem->numOfRows, pHandle->qId); @@ -662,7 +662,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh } } else { - tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, + tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qId); } @@ -673,7 +673,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 - "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64, + "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qId); @@ -683,7 +683,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh assert(pCheckInfo->lastKey >= key); } } else { - tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, + tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qId); } @@ -811,7 +811,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { } pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %"PRIu64, pHandle, + tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId); // all data in mem are checked already. @@ -986,21 +986,21 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc STSchema *pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); int32_t code = tdInitDataCols(pQueryHandle->pDataCols, pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); + tsdbError("%p failed to malloc buf for pDataCols, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } code = tdInitDataCols(pQueryHandle->rhelper.pDCols[0], pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %"PRIu64, pQueryHandle, pQueryHandle->qId); + tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } code = tdInitDataCols(pQueryHandle->rhelper.pDCols[1], pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %"PRIu64, pQueryHandle, pQueryHandle->qId); + tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } @@ -1036,14 +1036,14 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc int64_t elapsedTime = (taosGetTimestampUs() - st); pQueryHandle->cost.blockLoadTime += elapsedTime; - tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %"PRIu64, + tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, 0x%"PRIx64, pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qId); return TSDB_CODE_SUCCESS; _error: pBlock->numOfRows = 0; - tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %"PRIu64, + tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, 0x%"PRIx64, pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qId); return terrno; } @@ -1066,7 +1066,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p assert(cur->pos >= 0 && cur->pos <= binfo.rows); TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL; - tsdbDebug("%p key in mem:%"PRId64", %"PRIu64, pQueryHandle, key, pQueryHandle->qId); + tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId); if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { @@ -1406,7 +1406,11 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, SET_DOUBLE_PTR(pData, value); break; case TSDB_DATA_TYPE_TIMESTAMP: - *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + } else { + *(TSKEY *)pData = *(TSKEY *)value; + } break; default: memcpy(pData, value, pColInfo->info.bytes); @@ -1547,7 +1551,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos); doCheckGeneratedBlockRange(pQueryHandle); - tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64, + tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, 0x%"PRIx64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey, cur->win.ekey, cur->rows, pQueryHandle->qId); } @@ -1601,7 +1605,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo); tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d," - "end:%d, %"PRIu64, + "end:%d, 0x%"PRIx64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, cur->pos, endPos, pQueryHandle->qId); @@ -1743,7 +1747,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos); doCheckGeneratedBlockRange(pQueryHandle); - tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64, + tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, 0x%"PRIx64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey, cur->win.ekey, cur->rows, pQueryHandle->qId); } @@ -1919,12 +1923,12 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks); cleanBlockOrderSupporter(&sup, numOfQualTables); - tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %"PRIu64, pQueryHandle, cnt, + tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted 0x%"PRIx64, pQueryHandle, cnt, pQueryHandle->qId); return TSDB_CODE_SUCCESS; } - tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %"PRIu64, pQueryHandle, cnt, + tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables 0x%"PRIx64, pQueryHandle, cnt, numOfQualTables, pQueryHandle->qId); assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0 @@ -1961,7 +1965,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO * } */ - tsdbDebug("%p %d data blocks sort completed, %"PRIu64, pQueryHandle, cnt, pQueryHandle->qId); + tsdbDebug("%p %d data blocks sort completed, 0x%"PRIx64, pQueryHandle, cnt, pQueryHandle->qId); cleanBlockOrderSupporter(&sup, numOfTables); free(pTree); @@ -2019,7 +2023,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) { tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); - tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle, + tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle, pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId); pQueryHandle->pFileGroup = NULL; assert(pQueryHandle->numOfBlocks == 0); @@ -2043,7 +2047,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist break; } - tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables, + tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, 0x%"PRIx64, pQueryHandle, numOfBlocks, numOfTables, pQueryHandle->pFileGroup->fid, pQueryHandle->qId); assert(numOfBlocks >= 0); @@ -2135,7 +2139,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) { tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); - tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle, + tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle, pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId); pQueryHandle->pFileGroup = NULL; break; @@ -2159,7 +2163,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist break; } - tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables, + tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, 0x%"PRIx64, pQueryHandle, numOfBlocks, numOfTables, pQueryHandle->pFileGroup->fid, pQueryHandle->qId); if (numOfBlocks == 0) { @@ -2207,7 +2211,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists if ((!cur->mixBlock) || cur->blockCompleted) { // all data blocks in current file has been checked already, try next file if exists } else { - tsdbDebug("%p continue in current data block, index:%d, pos:%d, %"PRIu64, pQueryHandle, cur->slot, cur->pos, + tsdbDebug("%p continue in current data block, index:%d, pos:%d, 0x%"PRIx64, pQueryHandle, cur->slot, cur->pos, pQueryHandle->qId); int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = (pQueryHandle->realNumOfRows > 0); @@ -2336,7 +2340,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } int64_t elapsedTime = taosGetTimestampUs() - st; - tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %"PRIu64, pQueryHandle, + tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, 0x%"PRIx64, pQueryHandle, elapsedTime, numOfRows, numOfCols, pQueryHandle->qId); return numOfRows; @@ -3206,9 +3210,10 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%u", tsdb, pGroupInfo->numOfTables); - taosArrayDestroy(res); + tsdbDebug("%p no table name/tag condition, all tables qualified, numOfTables:%u, group:%zu", tsdb, + pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); + taosArrayDestroy(res); if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error; return ret; } @@ -3391,7 +3396,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; - tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %"PRIu64, + tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); tfree(pQueryHandle); @@ -3408,14 +3413,16 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { size_t numOfTables = taosArrayGetSize(p); for(int32_t j = 0; j < numOfTables; ++j) { STable* pTable = taosArrayGetP(p, j); - assert(pTable != NULL); - - tsdbUnRefTable(pTable); + if (pTable != NULL) { // in case of handling retrieve data from tsdb + tsdbUnRefTable(pTable); + } + //assert(pTable != NULL); } taosArrayDestroy(p); } + taosHashCleanup(pGroupList->map); taosArrayDestroy(pGroupList->pGroupList); pGroupList->numOfTables = 0; } @@ -3426,7 +3433,7 @@ static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SA // Scan each node in the skiplist by using iterator while (tSkipListIterNext(iter)) { SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplayFilter(pExpr, pNode, param)) { + if (exprTreeApplyFilter(pExpr, pNode, param)) { taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); } } diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 4d18ef14e2a1985259b11ff65391616c9d4706b2..354e7899c27d9af12f107964b067311fce10c84f 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -398,6 +398,10 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2)); case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2)); + case TSDB_DATA_TYPE_UTINYINT: DEFAULT_COMP(GET_UINT8_VAL(f1), GET_UINT8_VAL(f2)); + case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2)); + case TSDB_DATA_TYPE_UINT: DEFAULT_COMP(GET_UINT32_VAL(f1), GET_UINT32_VAL(f2)); + case TSDB_DATA_TYPE_UBIGINT: DEFAULT_COMP(GET_UINT64_VAL(f1), GET_UINT64_VAL(f2)); case TSDB_DATA_TYPE_NCHAR: { tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; diff --git a/src/util/src/tcrc32c.c b/src/util/src/tcrc32c.c index 502116f9c27be1900f1c02d4bc9f2281fd9235af..054b8f8171e5f0f2b64edfb934450902c42662c9 100644 --- a/src/util/src/tcrc32c.c +++ b/src/util/src/tcrc32c.c @@ -736,7 +736,7 @@ static uint32_t table[16][256] = { 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa} }; -#ifndef _TD_ARM_ +#if !defined(_TD_ARM_) && !defined(_TD_MIPS_) static uint32_t long_shifts[4][256] = { {0x00000000, 0xe040e0ac, 0xc56db7a9, 0x252d5705, 0x8f3719a3, 0x6f77f90f, 0x4a5aae0a, 0xaa1a4ea6, 0x1b8245b7, 0xfbc2a51b, 0xdeeff21e, 0x3eaf12b2, @@ -1187,7 +1187,7 @@ uint32_t crc32c_sf(uint32_t crci, crc_stream input, size_t length) { } return (uint32_t)crc ^ 0xffffffff; } -#ifndef _TD_ARM_ +#if !defined(_TD_ARM_) && !defined(_TD_MIPS_) /* Apply the zeros operator table to crc. */ static uint32_t shift_crc(uint32_t shift_table[][256], uint32_t crc) { return shift_table[0][crc & 0xff] ^ shift_table[1][(crc >> 8) & 0xff] ^ @@ -1198,7 +1198,7 @@ static uint32_t shift_crc(uint32_t shift_table[][256], uint32_t crc) { version. Otherwise, use the software version. */ uint32_t (*crc32c)(uint32_t crci, crc_stream bytes, size_t len) = crc32c_sf; -#ifndef _TD_ARM_ +#if !defined(_TD_ARM_) && !defined(_TD_MIPS_) /* Compute CRC-32C using the Intel hardware instruction. */ uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) { crc_stream next = buf; @@ -1353,7 +1353,7 @@ uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) { #endif // #ifndef _TD_ARM_ void taosResolveCRC() { -#if defined _TD_ARM_ || defined WINDOWS +#if defined _TD_ARM_ || defined _TD_MIPS_ || defined WINDOWS crc32c = crc32c_sf; #else int sse42; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 448bb3ecd1bfa8ef17bd7c87a464c095e1093d6d..7f127fc396a13f0a7796dcb4ce1dd63ce96cb951 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -416,7 +416,8 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { } } - if (dflag & DEBUG_SCREEN) taosWrite(1, buffer, (uint32_t)len); + if (dflag & DEBUG_SCREEN) + taosWrite(1, buffer, (uint32_t)len); if (dflag == 255) nInfo(buffer, len); } diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 28abad356c156182b23feebd8d2e80ea4c914e52..131063b0de8b456ebd74011f74f95d199695b75b 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -291,16 +291,16 @@ static void taosNetCheckPort(uint32_t hostIp, int32_t startPort, int32_t endPort info.port = port; ret = taosNetCheckTcpPort(&info); if (ret != 0) { - uError("failed to test TCP port:%d", port); + printf("failed to test TCP port:%d\n", port); } else { - uInfo("successed to test TCP port:%d", port); + printf("successed to test TCP port:%d\n", port); } ret = taosNetCheckUdpPort(&info); if (ret != 0) { - uError("failed to test UDP port:%d", port); + printf("failed to test UDP port:%d\n", port); } else { - uInfo("successed to test UDP port:%d", port); + printf("successed to test UDP port:%d\n", port); } } } @@ -464,9 +464,9 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) { int32_t ret = taosNetCheckRpc(host, port, sendpkgLen, spi, NULL); if (ret < 0) { - uError("failed to test TCP port:%d", port); + printf("failed to test TCP port:%d\n", port); } else { - uInfo("successed to test TCP port:%d", port); + printf("successed to test TCP port:%d\n", port); } if (pkgLen >= tsRpcMaxUdpSize) { @@ -477,9 +477,9 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) { ret = taosNetCheckRpc(host, port, pkgLen, spi, NULL); if (ret < 0) { - uError("failed to test UDP port:%d", port); + printf("failed to test UDP port:%d\n", port); } else { - uInfo("successed to test UDP port:%d", port); + printf("successed to test UDP port:%d\n", port); } } @@ -539,7 +539,7 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) { } void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { - tscEmbedded = 1; +// tscEmbedded = 1; if (host == NULL) host = tsLocalFqdn; if (port == 0) port = tsServerPort; if (pkgLen <= 10) pkgLen = 1000; @@ -559,5 +559,5 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { taosNetTestStartup(host, port); } - tscEmbedded = 0; +// tscEmbedded = 0; } diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index 98334497772d60991a553b5d929404ce5e3437e4..865e1159c1995b2796682d64ee06de02442b7a25 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -538,7 +538,7 @@ static void taosTmrModuleInit(void) { void* taosTmrInit(int maxNumOfTmrs, int resolution, int longest, const char* label) { const char* ret = monotonicInit(); - tmrInfo("ttimer monotonic clock source:%s", ret); + tmrDebug("ttimer monotonic clock source:%s", ret); pthread_once(&tmrModuleInit, taosTmrModuleInit); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index ee8ed9e0fa3507363f04841060011de85422efec..0921c5ce48d3cd525bb4d741585de10425e10b76 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -232,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { return code; } +static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) { + char vnodeDir[TSDB_FILENAME_LEN] = "\0"; + snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId); + + TDIR *tdir = tfsOpendir(vnodeDir); + if (!tdir) return; + + const TFILE *tfile = tfsReaddir(tdir); + if (!tfile) { + tfsClosedir(tdir); + return; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId); + + tfsClosedir(tdir); +} + int32_t vnodeOpen(int32_t vgId) { char temp[TSDB_FILENAME_LEN * 3]; char rootDir[TSDB_FILENAME_LEN * 2]; + char walRootDir[TSDB_FILENAME_LEN * 2] = {0}; snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); @@ -321,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) { } } - sprintf(temp, "%s/wal", rootDir); + // walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal) + vnodeFindWalRootDir(pVnode->vgId, walRootDir); + if (walRootDir[0] == 0) { + int level = -1, id = -1; + + tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id); + if (level < 0 || id < 0) { + vnodeCleanUp(pVnode); + return terrno; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId); + } + + sprintf(temp, "%s/wal", walRootDir); pVnode->walCfg.vgId = pVnode->vgId; pVnode->wal = walOpen(temp, &pVnode->walCfg); if (pVnode->wal == NULL) { @@ -353,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) { pVnode->events = NULL; - vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); + vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode); vnodeAddIntoHash(pVnode); @@ -361,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; - tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); + tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN); syncInfo.getWalInfoFp = vnodeGetWalInfo; syncInfo.writeToCacheFp = vnodeWriteToCache; syncInfo.confirmForward = vnodeConfirmForard; diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 2448fada502177a0bcb4cf3deb194cce68e7854e..b28bdbf130aaef8b3e3ffc8cda0653e46d0efc29 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -183,7 +183,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, uint64_t qId, v } } else { *freeHandle = true; - vTrace("QInfo:%"PRIu64"-%p exec completed, free handle:%d", qId, *handle, *freeHandle); + vTrace("QInfo:0x%"PRIx64"-%p exec completed, free handle:%d", qId, *handle, *freeHandle); } } else { SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -232,7 +232,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; - pRsp->qhandle = 0; + pRsp->qId = 0; pRet->len = sizeof(SQueryTableRsp); pRet->rsp = pRsp; @@ -244,18 +244,18 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { if (handle == NULL) { // failed to register qhandle pRsp->code = terrno; terrno = 0; - vError("vgId:%d, QInfo:%"PRIu64 "-%p register qhandle failed, return to app, code:%s", pVnode->vgId, qId, (void *)pQInfo, + vError("vgId:%d, QInfo:0x%"PRIx64 "-%p register qhandle failed, return to app, code:%s", pVnode->vgId, qId, (void *)pQInfo, tstrerror(pRsp->code)); qDestroyQueryInfo(pQInfo); // destroy it directly return pRsp->code; } else { assert(*handle == pQInfo); - pRsp->qhandle = htobe64(qId); + pRsp->qId = htobe64(qId); } if (handle != NULL && vnodeNotifyCurrentQhandle(pRead->rpcHandle, qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { - vError("vgId:%d, QInfo:%"PRIu64 "-%p, query discarded since link is broken, %p", pVnode->vgId, qId, *handle, + vError("vgId:%d, QInfo:0x%"PRIx64 "-%p, query discarded since link is broken, %p", pVnode->vgId, qId, *handle, pRead->rpcHandle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); @@ -266,7 +266,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } if (handle != NULL) { - vTrace("vgId:%d, QInfo:%"PRIu64 "-%p, dnode query msg disposed, create qhandle and returns to app", vgId, qId, *handle); + vTrace("vgId:%d, QInfo:0x%"PRIx64 "-%p, dnode query msg disposed, create qhandle and returns to app", vgId, qId, *handle); code = vnodePutItemIntoReadQueue(pVnode, handle, pRead->rpcHandle); if (code != TSDB_CODE_SUCCESS) { pRsp->code = code; @@ -324,14 +324,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { - void * pCont = pRead->pCont; - SRspRet *pRet = &pRead->rspRet; + void *pCont = pRead->pCont; + SRspRet *pRet = &pRead->rspRet; SRetrieveTableMsg *pRetrieve = pCont; pRetrieve->free = htons(pRetrieve->free); pRetrieve->qId = htobe64(pRetrieve->qId); - vTrace("vgId:%d, qId:%" PRIu64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qId, + vTrace("vgId:%d, qId:0x%" PRIx64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qId, pRetrieve->free, pRead->rpcHandle); memset(pRet, 0, sizeof(SRspRet)); @@ -383,7 +383,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); freeHandle = true; } else { // result is not ready, return immediately - // Only effects in the non-blocking model + // Only affects the non-blocking model if (!tsRetrieveBlockingModel) { if (!buildRes) { assert(pRead->rpcHandle != NULL); @@ -414,7 +414,7 @@ int32_t vnodeNotifyCurrentQhandle(void *handle, uint64_t qId, void *qhandle, int pMsg->header.vgId = htonl(vgId); pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); - vTrace("QInfo:%"PRIu64"-%p register qhandle to connect:%p", qId, qhandle, handle); + vTrace("QInfo:0x%"PRIx64"-%p register qhandle to connect:%p", qId, qhandle, handle); return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg)); } diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index aab685e67843fe63b9c3860254988a9ec5ec8d98..def9cf3b32121206f270fdbe9c131ddad668a7e5 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -347,9 +347,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { vDebug("vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d", pVnode->vgId, pWrite, pWrite->processedCount); pWrite->processedCount = 0; + void *handle = pWrite->rpcMsg.handle; code = vnodeWriteToWQueueImp(pWrite); - if (code != 0) { - dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code); + if (code != TSDB_CODE_SUCCESS) { + SRpcMsg rpcRsp = {.handle = handle, .code = code}; + rpcSendResponse(&rpcRsp); } } } diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 39ce2657aa2c27c2249d1492f5dbcde2517cc988..55ab9b031beaa35da49523af21c581a5aae47e3b 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) { pWal->level = pCfg->walLevel; pWal->fsyncPeriod = pCfg->fsyncPeriod; - pWal->fsyncSeq = pCfg->fsyncPeriod % 1000; + pWal->fsyncSeq = pCfg->fsyncPeriod / 1000; if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1; return TSDB_CODE_SUCCESS; diff --git a/tests/comparisonTest/cassandra/cassandratest/pom.xml b/tests/comparisonTest/cassandra/cassandratest/pom.xml index 01667ff53a87ba0e2598d94c52c3523e7d5aea5d..8eeb5c3aa092ba360256a0e02ccdd9cead113b95 100644 --- a/tests/comparisonTest/cassandra/cassandratest/pom.xml +++ b/tests/comparisonTest/cassandra/cassandratest/pom.xml @@ -100,7 +100,7 @@ commons-io commons-io - 2.4 + 2.7 diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml index ae288ae294a8569d10c5c98b3e82e2833ef4ed68..0af7491128cf21fc3a35d32701c0aec1aeff88cd 100644 --- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml +++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml @@ -198,7 +198,7 @@ commons-io commons-io - 2.4 + 2.7 diff --git a/tests/examples/C#/C#checker/C#checker.cs b/tests/examples/C#/C#checker/C#checker.cs index 24b7060b14862e220b9b08a362e27cd65ae4eb7d..80fa3b838661ecbc80d727612166c5396df279b3 100644 --- a/tests/examples/C#/C#checker/C#checker.cs +++ b/tests/examples/C#/C#checker/C#checker.cs @@ -20,358 +20,367 @@ using System.Runtime.InteropServices; using System.Collections; namespace TDengineDriver -{ - class TDengineTest - { - //connect parameters - private string host; - private string configDir; - private string user; - private string password; - private short port = 0; - - //sql parameters - private string dbName; - private string tbName; - - - private bool isInsertData; - private bool isQueryData; - - private long tableCount; - private long totalRows; - private long batchRows; - private long beginTimestamp = 1551369600000L; - - private IntPtr conn = IntPtr.Zero; - private long rowsInserted = 0; - - static void Main(string[] args) - { - TDengineTest tester = new TDengineTest(); - tester.ReadArgument(args); - - - tester.InitTDengine(); - tester.ConnectTDengine(); +{ + class TDengineTest + { + //connect parameters + private string host; + private string configDir; + private string user; + private string password; + private short port = 0; + + //sql parameters + private string dbName; + private string tbName; + + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineTest tester = new TDengineTest(); + tester.ReadArgument(args); + + + tester.InitTDengine(); + tester.ConnectTDengine(); tester.createDatabase(); - tester.useDatabase(); + tester.useDatabase(); tester.checkDropTable(); tester.createTable(); tester.checkInsert(); tester.checkSelect(); - tester.checkDropTable(); - - tester.CloseConnection(); - - - - } - - public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - - long tmpVal = Convert.ToInt64(tmp); - if (tmpVal < minVal || tmpVal > maxVal) - { - Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); - ExitProgram(); - } - - return tmpVal; - } - } - - return defaultValue; - } - - public String GetArgumentAsString(String[] argv, String argName, String defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - return tmp; - } - } - - return defaultValue; - } - - public void PrintHelp(String[] argv) - { - for (int i = 0; i < argv.Length; ++i) - { - if ("--help" == argv[i]) - { - String indent = " "; - Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); - Console.WriteLine("{0:G}{1:G}", indent, "-h"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); - Console.WriteLine("{0:G}{1:G}", indent, "-u"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); - Console.WriteLine("{0:G}{1:G}", indent, "-p"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); - Console.WriteLine("{0:G}{1:G}", indent, "-d"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); - Console.WriteLine("{0:G}{1:G}", indent, "-s"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); - Console.WriteLine("{0:G}{1:G}", indent, "-t"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); - Console.WriteLine("{0:G}{1:G}", indent, "-w"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); - Console.WriteLine("{0:G}{1:G}", indent, "-r"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); - Console.WriteLine("{0:G}{1:G}", indent, "-n"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-b"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-i"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); - Console.WriteLine("{0:G}{1:G}", indent, "-c"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); - - ExitProgram(); - } - } - } - - public void ReadArgument(String[] argv) - { - PrintHelp(argv); - host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); - user = this.GetArgumentAsString(argv, "-u", "root"); - password = this.GetArgumentAsString(argv, "-p", "taosdata"); - dbName = this.GetArgumentAsString(argv, "-db", "test"); - tbName = this.GetArgumentAsString(argv, "-s", "weather"); - - isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; - isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; - tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); - batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000, 500); - totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); - configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); - } - - public void InitTDengine() - { - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); - TDengine.Init(); - Console.WriteLine("get connection starting..."); - } - - public void ConnectTDengine() - { - string db = ""; - this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); - if (this.conn == IntPtr.Zero) - { - Console.WriteLine("connection failed: " + this.host); - ExitProgram(); - } - else - { - Console.WriteLine("[ OK ] Connection established."); - } - } - public void createDatabase() - { - StringBuilder sql = new StringBuilder(); - sql.Append("create database if not exists ").Append(this.dbName); - execute(sql.ToString()); - } - public void useDatabase() - { - StringBuilder sql = new StringBuilder(); - sql.Append("use ").Append(this.dbName); - execute(sql.ToString()); - } - public void checkSelect() - { - StringBuilder sql = new StringBuilder(); - sql.Append("select * from test.weather"); - execute(sql.ToString()); - } - public void createTable() - { - StringBuilder sql = new StringBuilder(); - sql.Append("create table if not exists ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts timestamp, temperature float, humidity int)"); - execute(sql.ToString()); - } - public void checkInsert() - { - StringBuilder sql = new StringBuilder(); - sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"); - execute(sql.ToString()); - } - public void checkDropTable() - { - StringBuilder sql = new StringBuilder(); - sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append(""); - execute(sql.ToString()); - } - public void execute(string sql) - { - DateTime dt1 = DateTime.Now; - IntPtr res = TDengine.Query(this.conn, sql.ToString()); - DateTime dt2 = DateTime.Now; - TimeSpan span = dt2 - dt1; - - if (res != IntPtr.Zero) - { - Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); - } - else - { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); - } - TDengine.FreeResult(res); - } - - public void ExecuteQuery(string sql) - { - - DateTime dt1 = DateTime.Now; - long queryRows = 0; - IntPtr res = TDengine.Query(conn, sql); - if (res == IntPtr.Zero) - { - Console.WriteLine(sql + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); - } - DateTime dt2 = DateTime.Now; - TimeSpan span = dt2 - dt1; - Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); - int fieldCount = TDengine.FieldCount(res); - - List metas = TDengine.FetchFields(res); - for (int j = 0; j < metas.Count; j++) - { - TDengineMeta meta = (TDengineMeta)metas[j]; - } - - IntPtr rowdata; - StringBuilder builder = new StringBuilder(); - while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) - { - queryRows++; - for (int fields = 0; fields < fieldCount; ++fields) - { - TDengineMeta meta = metas[fields]; - int offset = IntPtr.Size * fields; - IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - - builder.Append("---"); - - if (data == IntPtr.Zero) - { - builder.Append("NULL"); - continue; - } - - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) == 0 ? false : true; - builder.Append(v1); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - byte v2 = Marshal.ReadByte(data); - builder.Append(v2); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - builder.Append(v3); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - builder.Append(v4); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - builder.Append(v5); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - builder.Append(v6); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - builder.Append(v7); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringAnsi(data); - builder.Append(v8); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - builder.Append(v9); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringAnsi(data); - builder.Append(v10); - break; - } - } - builder.Append("---"); - - if (queryRows <= 10) - { - Console.WriteLine(builder.ToString()); - } - builder.Clear(); - } - - if (TDengine.ErrorNo(res) != 0) - { - Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); - } - - TDengine.FreeResult(res); - - } - - public void CloseConnection() - { - if (this.conn != IntPtr.Zero) - { - TDengine.Close(this.conn); - Console.WriteLine("connection closed."); - } - } - - static void ExitProgram() - { - TDengine.Cleanup(); - System.Environment.Exit(0); - } + tester.checkDropTable(); + + tester.CloseConnection(); + + + + } + + public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + + long tmpVal = Convert.ToInt64(tmp); + if (tmpVal < minVal || tmpVal > maxVal) + { + Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); + ExitProgram(); + } + + return tmpVal; + } + } + + return defaultValue; + } + + public String GetArgumentAsString(String[] argv, String argName, String defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + return tmp; + } + } + + return defaultValue; + } + + public void PrintHelp(String[] argv) + { + for (int i = 0; i < argv.Length; ++i) + { + if ("--help" == argv[i]) + { + String indent = " "; + Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); + Console.WriteLine("{0:G}{1:G}", indent, "-h"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); + Console.WriteLine("{0:G}{1:G}", indent, "-u"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); + Console.WriteLine("{0:G}{1:G}", indent, "-p"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); + Console.WriteLine("{0:G}{1:G}", indent, "-d"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); + Console.WriteLine("{0:G}{1:G}", indent, "-s"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); + Console.WriteLine("{0:G}{1:G}", indent, "-t"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); + Console.WriteLine("{0:G}{1:G}", indent, "-w"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); + Console.WriteLine("{0:G}{1:G}", indent, "-r"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); + Console.WriteLine("{0:G}{1:G}", indent, "-n"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-b"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-i"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); + Console.WriteLine("{0:G}{1:G}", indent, "-c"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); + + ExitProgram(); + } + } + } + + public void ReadArgument(String[] argv) + { + PrintHelp(argv); + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); + user = this.GetArgumentAsString(argv, "-u", "root"); + password = this.GetArgumentAsString(argv, "-p", "taosdata"); + dbName = this.GetArgumentAsString(argv, "-db", "test"); + tbName = this.GetArgumentAsString(argv, "-s", "weather"); + + isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; + isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; + tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); + batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000, 500); + totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); + configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + Console.WriteLine("get connection starting..."); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("connection failed: " + this.host); + ExitProgram(); + } + else + { + Console.WriteLine("[ OK ] Connection established."); + } + } + public void createDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("create database if not exists ").Append(this.dbName); + execute(sql.ToString()); + } + public void useDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("use ").Append(this.dbName); + execute(sql.ToString()); + } + public void checkSelect() + { + StringBuilder sql = new StringBuilder(); + sql.Append("select * from test.weather"); + execute(sql.ToString()); + } + public void createTable() + { + StringBuilder sql = new StringBuilder(); + sql.Append("create table if not exists ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts timestamp, temperature float, humidity int)"); + execute(sql.ToString()); + } + public void checkInsert() + { + StringBuilder sql = new StringBuilder(); + sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"); + execute(sql.ToString()); + } + public void checkDropTable() + { + StringBuilder sql = new StringBuilder(); + sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append(""); + execute(sql.ToString()); + } + public void execute(string sql) + { + DateTime dt1 = DateTime.Now; + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + + public void ExecuteQuery(string sql) + { + + DateTime dt1 = DateTime.Now; + long queryRows = 0; + IntPtr res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + int fieldCount = TDengine.FieldCount(res); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + Console.WriteLine("connection closed."); + } + } + + static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } } } diff --git a/tests/examples/C#/TDengineDriver.cs b/tests/examples/C#/TDengineDriver.cs index b6f143e1813d60c1ac4ae8356efdca4929c51345..2c150341f62d16372a99d341a495771e4c2a3dbc 100644 --- a/tests/examples/C#/TDengineDriver.cs +++ b/tests/examples/C#/TDengineDriver.cs @@ -19,136 +19,149 @@ using System.Runtime.InteropServices; namespace TDengineDriver { - enum TDengineDataType { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() + enum TDengineDataType { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes } - } - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } - [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } - [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; - [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); - [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); - [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); - [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } - [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); - [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; - [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - } -} \ No newline at end of file + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} diff --git a/tests/examples/C#/TDengineTest.cs b/tests/examples/C#/TDengineTest.cs index 6b3f1160adc08fd18a16a1292b08a8b798ce389d..f4ee62527feda4d43b21f37e9c513af2053e1f9d 100644 --- a/tests/examples/C#/TDengineTest.cs +++ b/tests/examples/C#/TDengineTest.cs @@ -165,7 +165,7 @@ namespace TDengineDriver public void ReadArgument(String[] argv) { PrintHelp(argv); - host = this.GetArgumentAsString(argv, "-h", "192.168.100.128"); + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); user = this.GetArgumentAsString(argv, "-u", "root"); password = this.GetArgumentAsString(argv, "-p", "taosdata"); dbName = this.GetArgumentAsString(argv, "-d", "db"); @@ -212,42 +212,54 @@ namespace TDengineDriver StringBuilder sql = new StringBuilder(); sql.Append("create database if not exists ").Append(this.dbName); IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql.ToString() + " success"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); + Console.WriteLine(sql.ToString() + " success"); } TDengine.FreeResult(res); sql.Clear(); sql.Append("use ").Append(this.dbName); res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql.ToString() + " success"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); + Console.WriteLine(sql.ToString() + " success"); } TDengine.FreeResult(res); sql.Clear(); sql.Append("create table if not exists ").Append(this.stableName).Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql.ToString() + " success"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); + Console.WriteLine(sql.ToString() + " success"); } TDengine.FreeResult(res); @@ -257,14 +269,18 @@ namespace TDengineDriver sql = sql.Append("create table if not exists ").Append(this.tablePrefix).Append(i) .Append(" using ").Append(this.stableName).Append(" tags(").Append(i).Append(")"); res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql.ToString() + " success"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - ExitProgram(); + Console.WriteLine(sql.ToString() + " success"); } TDengine.FreeResult(res); } @@ -298,9 +314,13 @@ namespace TDengineDriver .Append(", 5, 6, 7, 'abc', 'def')"); } IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res == IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); } int affectRows = TDengine.AffectRows(res); @@ -326,16 +346,20 @@ namespace TDengineDriver System.DateTime start = new System.DateTime(); long queryRows = 0; - + for (int i = 0; i < 1/*this.tableCount*/; ++i) { String sql = "select * from " + this.dbName + "." + tablePrefix + i; Console.WriteLine(sql); IntPtr res = TDengine.Query(conn, sql); - if (res == IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql + " failure, reason: " + TDengine.Error(res)); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); ExitProgram(); } @@ -410,6 +434,22 @@ namespace TDengineDriver string v10 = Marshal.PtrToStringAnsi(data); builder.Append(v10); break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v11 = Marshal.ReadByte(data); + builder.Append(v11); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v12 = (ushort)Marshal.ReadInt16(data); + builder.Append(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v13 = (uint)Marshal.ReadInt32(data); + builder.Append(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v14 = (ulong)Marshal.ReadInt64(data); + builder.Append(v14); + break; } } builder.Append("---"); @@ -423,8 +463,10 @@ namespace TDengineDriver if (TDengine.ErrorNo(res) != 0) { - Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + Console.Write("Query is not complete, Error {0:G}", + TDengine.ErrorNo(res), TDengine.Error(res)); } + Console.WriteLine(""); TDengine.FreeResult(res); } diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs index 205269501d376a4753b3aedbfa8d512b2df31600..2c150341f62d16372a99d341a495771e4c2a3dbc 100644 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ b/tests/examples/C#/taosdemo/TDengineDriver.cs @@ -31,7 +31,11 @@ namespace TDengineDriver TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes TSDB_DATA_TYPE_BINARY = 8, // string TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes } enum TDengineInitOption @@ -53,15 +57,23 @@ namespace TDengineDriver switch ((TDengineDataType)type) { case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; + return "BOOL"; case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; + return "TINYINT"; case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; + return "SMALLINT"; case TDengineDataType.TSDB_DATA_TYPE_INT: return "INT"; case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; case TDengineDataType.TSDB_DATA_TYPE_FLOAT: return "FLOAT"; case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index 2d78418e0ab8599f87a544b833b9b6bbd0b48ee7..e092c48f15314f5cad0a9509190d7b9970a7073a 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -63,7 +63,7 @@ namespace TDengineDriver static void HelpPrint(string arg, string desc) { string indent = " "; - Console.WriteLine("{0}{1}", indent, arg.PadRight(25)+desc); + Console.WriteLine("{0}{1}", indent, arg.PadRight(25) + desc); } static void PrintHelp(String[] argv) @@ -142,33 +142,33 @@ namespace TDengineDriver verbose = this.GetArgumentAsFlag(argv, "-v", true); debug = this.GetArgumentAsFlag(argv, "-g", true); - VerbosePrint ("###################################################################\n"); - VerbosePrintFormat ("# Server IP: {0}\n", host); - VerbosePrintFormat ("# User: {0}\n", user); - VerbosePrintFormat ("# Password: {0}\n", password); - VerbosePrintFormat ("# Number of Columns per record: {0}\n", colsPerRecord); - VerbosePrintFormat ("# Number of Threads: {0}\n", numOfThreads); - VerbosePrintFormat ("# Number of Tables: {0}\n", numOfTables); - VerbosePrintFormat ("# Number of records per Table: {0}\n", recordsPerTable); - VerbosePrintFormat ("# Records/Request: {0}\n", recordsPerRequest); - VerbosePrintFormat ("# Database name: {0}\n", dbName); - VerbosePrintFormat ("# Replica: {0}\n", replica); - VerbosePrintFormat ("# Use STable: {0}\n", useStable); - VerbosePrintFormat ("# Table prefix: {0}\n", tablePrefix); + VerbosePrint("###################################################################\n"); + VerbosePrintFormat("# Server IP: {0}\n", host); + VerbosePrintFormat("# User: {0}\n", user); + VerbosePrintFormat("# Password: {0}\n", password); + VerbosePrintFormat("# Number of Columns per record: {0}\n", colsPerRecord); + VerbosePrintFormat("# Number of Threads: {0}\n", numOfThreads); + VerbosePrintFormat("# Number of Tables: {0}\n", numOfTables); + VerbosePrintFormat("# Number of records per Table: {0}\n", recordsPerTable); + VerbosePrintFormat("# Records/Request: {0}\n", recordsPerRequest); + VerbosePrintFormat("# Database name: {0}\n", dbName); + VerbosePrintFormat("# Replica: {0}\n", replica); + VerbosePrintFormat("# Use STable: {0}\n", useStable); + VerbosePrintFormat("# Table prefix: {0}\n", tablePrefix); if (useStable == true) { VerbosePrintFormat("# STable prefix: {0}\n", stablePrefix); } - VerbosePrintFormat ("# Data order: {0}\n", order); - VerbosePrintFormat ("# Data out of order rate: {0}\n", rateOfOutorder); - VerbosePrintFormat ("# Delete method: {0}\n", methodOfDelete); - VerbosePrintFormat ("# Query command: {0}\n", query); - VerbosePrintFormat ("# Query Mode: {0}\n", queryMode); - VerbosePrintFormat ("# Insert Only: {0}\n", isInsertOnly); - VerbosePrintFormat ("# Verbose output {0}\n", verbose); - VerbosePrintFormat ("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt")); + VerbosePrintFormat("# Data order: {0}\n", order); + VerbosePrintFormat("# Data out of order rate: {0}\n", rateOfOutorder); + VerbosePrintFormat("# Delete method: {0}\n", methodOfDelete); + VerbosePrintFormat("# Query command: {0}\n", query); + VerbosePrintFormat("# Query Mode: {0}\n", queryMode); + VerbosePrintFormat("# Insert Only: {0}\n", isInsertOnly); + VerbosePrintFormat("# Verbose output {0}\n", verbose); + VerbosePrintFormat("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt")); - VerbosePrint ("###################################################################\n"); + VerbosePrint("###################################################################\n"); if (skipReadKey == false) { @@ -370,31 +370,38 @@ namespace TDengineDriver StringBuilder sql = new StringBuilder(); sql.Append("DROP DATABASE IF EXISTS ").Append(this.dbName); IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - VerbosePrint(sql.ToString() + " success\n"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + CleanAndExitProgram(1); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - CleanAndExitProgram(1); + VerbosePrint(sql.ToString() + " success\n"); } - } public void CreateDb() { StringBuilder sql = new StringBuilder(); - sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica); + sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica).Append(" keep 36500"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - VerbosePrint(sql.ToString() + " success\n"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + CleanAndExitProgram(1); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - CleanAndExitProgram(1); + VerbosePrint(sql.ToString() + " success\n"); } TDengine.FreeResult(res); } @@ -406,16 +413,20 @@ namespace TDengineDriver sql.Clear(); sql.Append("CREATE TABLE IF NOT EXISTS "). Append(this.dbName).Append(".").Append(this.stablePrefix). - Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); + Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned) tags(t1 int)"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - VerbosePrint(sql.ToString() + " success\n"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + CleanAndExitProgram(1); } else { - Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); - CleanAndExitProgram(1); + VerbosePrint(sql.ToString() + " success\n"); } TDengine.FreeResult(res); } @@ -495,9 +506,13 @@ namespace TDengineDriver IntPtr res = TDengine.Query(conn, sql); DebugPrintFormat("res: {0}\n", res); - if (res == IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - Console.WriteLine(sql + " failure, reason: " + TDengine.Error(res)); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); CleanAndExitProgram(1); } @@ -523,7 +538,7 @@ namespace TDengineDriver int offset = IntPtr.Size * fields; IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - builder.Append("---"); + builder.Append(" | "); if (data == IntPtr.Zero) { @@ -538,7 +553,7 @@ namespace TDengineDriver builder.Append(v1); break; case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - byte v2 = Marshal.ReadByte(data); + sbyte v2 = (sbyte)Marshal.ReadByte(data); builder.Append(v2); break; case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: @@ -573,9 +588,25 @@ namespace TDengineDriver string v10 = Marshal.PtrToStringAnsi(data); builder.Append(v10); break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v11 = Marshal.ReadByte(data); + builder.Append(v11); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v12 = (ushort)Marshal.ReadInt16(data); + builder.Append(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v13 = (uint)Marshal.ReadInt32(data); + builder.Append(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v14 = (ulong)Marshal.ReadInt64(data); + builder.Append(v14); + break; } } - builder.Append("---"); + builder.Append(" | "); VerbosePrint(builder.ToString() + "\n"); builder.Clear(); @@ -642,8 +673,8 @@ namespace TDengineDriver tester.ExecuteQuery(); watch.Stop(); elapsedMs = watch.Elapsed.TotalMilliseconds; - Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n", - elapsedMs/1000, + Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n", + elapsedMs / 1000, tester.recordsPerTable * tester.numOfTables ); } @@ -712,9 +743,9 @@ namespace TDengineDriver int m = now.Minute; int s = now.Second; - long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0 + long baseTimestamp = -16094340000; // 1969-06-29 01:21:00 VerbosePrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s); - long beginTimestamp = baseTimestamp + ((h*60 + m) * 60 + s) * 1000; + long beginTimestamp = baseTimestamp + ((h * 60 + m) * 60 + s) * 1000; Random random = new Random(); long rowsInserted = 0; @@ -755,15 +786,24 @@ namespace TDengineDriver sql.Append("(") .Append(writeTimeStamp) - .Append(", 1, 2, 3,") - .Append(i + batch) - .Append(", 5, 6, 7, 'abc', 'def')"); + .Append(", 1, -2, -3,") + .Append(i + batch - 127) + .Append(", -5, -6, -7, 'abc', 'def', 254, 65534,") + .Append(4294967294 - (uint)i - (uint)batch) + .Append(",") + .Append(18446744073709551614 - (ulong)i - (ulong)batch) + .Append(")"); } + VerbosePrint(sql.ToString() + "\n"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res == IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - VerbosePrint(sql.ToString() + " failure, reason: " + TDengine.Error(res) + "\n"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); } inserted += this.batchRows; @@ -837,17 +877,21 @@ namespace TDengineDriver } else { - sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))"); + sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned)"); } IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if (res != IntPtr.Zero) + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) { - VerbosePrint(sql.ToString() + " success\n"); + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + CleanAndExitProgram(1); } else { - VerbosePrint(sql.ToString() + " failure, reason: " + TDengine.Error(res) + "\n"); - CleanAndExitProgram(1); + VerbosePrint(sql.ToString() + " success\n"); } TDengine.FreeResult(res); } diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index bd5f7efbc0321962a3f3f41a17bc9bde5dee9b27..6c83718896cc2e5716f599ba08212d3dc8292133 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,9 +63,9 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.20 + 2.0.28 - + diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java index 8f30c299466ca1f83bc689928b08e8001a87908d..fa10f3b0929e4c25c1379f489f73fc12ad9c1917 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java @@ -7,6 +7,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication; @MapperScan(basePackages = {"com.taosdata.example.springbootdemo.dao"}) @SpringBootApplication public class SpringbootdemoApplication { + public static void main(String[] args) { SpringApplication.run(SpringbootdemoApplication.class, args); } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index c153e27701403b672709a3585603b8630796d178..cf14f5d84ace6348f38709ac3d3668ee8d2a0797 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -45,7 +45,7 @@ public class WeatherController { * @return */ @PostMapping("/{temperature}/{humidity}") - public int saveWeather(@PathVariable float temperature, @PathVariable int humidity) { + public int saveWeather(@PathVariable float temperature, @PathVariable float humidity) { return weatherService.save(temperature, humidity); } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java index 255b2cdca920a5568338e823d0744f2e9c4db7d3..c11b9a6f50655788d1e35eb9607a101d2d06c872 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java @@ -8,8 +8,8 @@ public class Weather { @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") private Timestamp ts; - private float temperature; - private float humidity; + private Float temperature; + private Float humidity; private String location; private int groupId; @@ -30,19 +30,19 @@ public class Weather { this.ts = ts; } - public float getTemperature() { + public Float getTemperature() { return temperature; } - public void setTemperature(float temperature) { + public void setTemperature(Float temperature) { this.temperature = temperature; } - public float getHumidity() { + public Float getHumidity() { return humidity; } - public void setHumidity(float humidity) { + public void setHumidity(Float humidity) { this.humidity = humidity; } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java index 0aef828e1cf19f9c612f9c8d0433ce1a361c7441..26d09c7d128015739cdb0a87956affa4910b4b4e 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java @@ -39,7 +39,7 @@ public class WeatherService { return weatherMapper.select(limit, offset); } - public int save(float temperature, int humidity) { + public int save(float temperature, float humidity) { Weather weather = new Weather(); weather.setTemperature(temperature); weather.setHumidity(humidity); diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 930a6075cae2767c0d50ca2e1574d6441430d1b3..f20c0321c455da9c430aa3a4eb1d32af7d71da8b 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -435,11 +435,15 @@ void verify_async(TAOS* taos) { } void stream_callback(void *param, TAOS_RES *res, TAOS_ROW row) { + if (res == NULL || row == NULL) { + return; + } + int num_fields = taos_num_fields(res); TAOS_FIELD* fields = taos_fetch_fields(res); printf("got one row from stream_callback\n"); - char temp[256]; + char temp[256] = {0}; taos_print_row(temp, row, fields, num_fields); puts(temp); } diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index e074e6496607bf2046496c7b6766b6f3dedf9dbc..0b1cd7b5d2d304a89656c98e88b2a620e16ae5a1 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } - for (int i = 0; i < 4000000; i++) { + for (int i = 0; i < 100; i++) { Test(taos, qstr, i); } taos_close(taos); diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go index 003f5aeddccd7f988b62227815a98cde8610f311..543cfcc0f65aad154cc411891a76ae2fdb4e0e02 100644 --- a/tests/examples/go/taosdemo.go +++ b/tests/examples/go/taosdemo.go @@ -18,6 +18,7 @@ import ( "database/sql" "flag" "fmt" + "log" "math/rand" "os" "runtime" @@ -26,8 +27,6 @@ import ( "time" _ "github.com/taosdata/driver-go/taosSql" - - //"golang.org/x/sys/unix" ) const ( @@ -48,6 +47,7 @@ type config struct { dbName string supTblName string tablePrefix string + mode string numOftables int numOfRecordsPerTable int numOfRecordsPerReq int @@ -70,6 +70,7 @@ func init() { flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") flag.StringVar(&configPara.dbName, "d", "test", "Destination database.") flag.StringVar(&configPara.tablePrefix, "m", "d", "Table prefix name.") + flag.StringVar(&configPara.mode, "M", "r", "mode,r:raw,s:stmt") flag.IntVar(&configPara.numOftables, "t", 2, "The number of tables.") flag.IntVar(&configPara.numOfRecordsPerTable, "n", 10, "The number of records per table.") flag.IntVar(&configPara.numOfRecordsPerReq, "r", 3, "The number of records per request.") @@ -94,6 +95,7 @@ func printAllArgs() { fmt.Printf("usr: %v\n", configPara.user) fmt.Printf("password: %v\n", configPara.password) fmt.Printf("dbName: %v\n", configPara.dbName) + fmt.Printf("mode: %v\n", configPara.mode) fmt.Printf("tablePrefix: %v\n", configPara.tablePrefix) fmt.Printf("numOftables: %v\n", configPara.numOftables) fmt.Printf("numOfRecordsPerTable: %v\n", configPara.numOfRecordsPerTable) @@ -119,6 +121,24 @@ func main() { //defer db.Close() rand.Seed(time.Now().Unix()) + if configPara.mode == "s" { + fmt.Printf("\n======== start stmt mode test ========\n") + db, err := sql.Open("taosSql", url) + if err != nil { + log.Fatalf("Open database error: %s\n", err) + } + defer db.Close() + demodbStmt := configPara.dbName + demotStmt := "demotStmt" + drop_database_stmt(db, demodbStmt) + create_database_stmt(db, demodbStmt) + use_database_stmt(db, demodbStmt) + create_table_stmt(db, demotStmt) + insert_data_stmt(db, demotStmt) + select_data_stmt(db, demotStmt) + return + } + createDatabase(configPara.dbName, configPara.supTblName) fmt.Printf("======== create database success! ========\n\n") @@ -407,6 +427,132 @@ func selectTest(dbName string, tbPrefix string, supTblName string) { checkErr(err, "rows next iteration error") } } +func drop_database_stmt(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + // drop test db + res, err := db.Exec("drop database if exists " + demodb) + checkErr(err, "drop database "+demodb) + + affectd, err := res.RowsAffected() + checkErr(err, "drop db, res.RowsAffected") + + et := time.Now().Nanosecond() + fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} + +func create_database_stmt(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + // create database + //var stmt interface{} + stmt, err := db.Prepare("create database ?") + checkErr(err, "create db, db.Prepare") + + //var res driver.Result + res, err := stmt.Exec(demodb) + checkErr(err, "create db, stmt.Exec") + + //fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected()) + affectd, err := res.RowsAffected() + checkErr(err, "create db, res.RowsAffected") + + et := time.Now().Nanosecond() + fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} + +func use_database_stmt(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + // create database + //var stmt interface{} + stmt, err := db.Prepare("use " + demodb) + checkErr(err, "use db, db.Prepare") + + res, err := stmt.Exec() + checkErr(err, "use db, stmt.Exec") + + affectd, err := res.RowsAffected() + checkErr(err, "use db, res.RowsAffected") + + et := time.Now().Nanosecond() + fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} + +func create_table_stmt(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + // create table + // (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double) + stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(10), ? tinyint, ? bool, ? binary(8), ? float, ? double)") + checkErr(err, "create table db.Prepare") + + res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv") + checkErr(err, "create table stmt.Exec") + + affectd, err := res.RowsAffected() + checkErr(err, "create table res.RowsAffected") + + et := time.Now().Nanosecond() + fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} + +func insert_data_stmt(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + // insert data into table + stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)") + checkErr(err, "insert db.Prepare") + + res, err := stmt.Exec(demot, "now", 1000, "'haidian'", 6, true, "'AI world'", 6987.654, 321.987, + "now+1s", 1001, "'changyang'", 7, false, "'DeepMode'", 12356.456, 128634.456, + "now+2s", 1002, "'chuangping'", 8, true, "'database'", 3879.456, 65433478.456) + checkErr(err, "insert data, stmt.Exec") + + affectd, err := res.RowsAffected() + checkErr(err, "res.RowsAffected") + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} + +func select_data_stmt(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + + stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?") // go binary mode + checkErr(err, "db.Prepare") + + rows, err := stmt.Query("ts", "id", "name", "len", "flag", "notes", "fv", "dv", demot) + checkErr(err, "stmt.Query") + + fmt.Printf("%10s%s%8s %5s %8s%s %s %10s%s %7s%s %8s%s %11s%s %14s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv") + var affectd int + for rows.Next() { + var ts string + var name string + var id int + var len int8 + var flag bool + var notes string + var fv float32 + var dv float64 + + err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) + //fmt.Println("start scan fields from row.rs, &fv:", &fv) + //err = rows.Scan(&fv) + checkErr(err, "rows.Scan") + + fmt.Printf("%s\t", ts) + fmt.Printf("%d\t", id) + fmt.Printf("%10s\t", name) + fmt.Printf("%d\t", len) + fmt.Printf("%t\t", flag) + fmt.Printf("%s\t", notes) + fmt.Printf("%06.3f\t", fv) + fmt.Printf("%09.6f\n", dv) + + affectd++ + + } + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9) +} func checkErr(err error, prompt string) { if err != nil { fmt.Printf("%s\n", prompt) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 8b9fa1a546eedc955b4b902a20731a4736d8f60d..9a160846838c76a4bc1f9f6f0b2d08a0f62fc1da 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -40,8 +40,8 @@ function buildTDengine { git remote update > /dev/null git reset --hard HEAD - git checkout develop - REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` + git checkout master + REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` LOCAL_COMMIT=`git rev-parse --short @` echo " LOCAL: $LOCAL_COMMIT" @@ -64,7 +64,7 @@ function runQueryPerfTest { [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 & echoInfo "Wait TDengine to start" - sleep 120 + sleep 300 echoInfo "Run Performance Test" cd $WORK_DIR/TDengine/tests/pytest @@ -73,6 +73,9 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT + + python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT + } diff --git a/tests/perftest-scripts/perftest-taosdemo-compare.sh b/tests/perftest-scripts/perftest-taosdemo-compare.sh new file mode 100755 index 0000000000000000000000000000000000000000..60b6d1310dbb023b81bad87efae273637b5f4dc0 --- /dev/null +++ b/tests/perftest-scripts/perftest-taosdemo-compare.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +WORK_DIR=/home/ubuntu/pxiao +TDENGINE_DIR=/home/ubuntu/pxiao/TDengine +NUM_OF_VERSIONS=5 +CURRENT_VERSION=0 +today=`date +"%Y%m%d"` +TAOSDEMO_COMPARE_TEST_REPORT=$TDENGINE_DIR/tests/taosdemo-compare-test-report-$today.log + +# Coloured Echoes +function red_echo { echo -e "\033[31m$@\033[0m"; } +function green_echo { echo -e "\033[32m$@\033[0m"; } +function yellow_echo { echo -e "\033[33m$@\033[0m"; } +function white_echo { echo -e "\033[1;37m$@\033[0m"; } +# Coloured Printfs +function red_printf { printf "\033[31m$@\033[0m"; } +function green_printf { printf "\033[32m$@\033[0m"; } +function yellow_printf { printf "\033[33m$@\033[0m"; } +function white_printf { printf "\033[1;37m$@\033[0m"; } +# Debugging Outputs +function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } +function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } +function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } +function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } + +function getCurrentVersion { + echoInfo "Build TDengine" + cd $WORK_DIR/TDengine + + git remote update > /dev/null + git reset --hard HEAD + git checkout master + REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` + LOCAL_COMMIT=`git rev-parse --short @` + + echo " LOCAL: $LOCAL_COMMIT" + echo "REMOTE: $REMOTE_COMMIT" + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + echo "repo up-to-date" + else + echo "repo need to pull" + git pull > /dev/null 2>&1 + fi + cd debug + rm -rf * + cmake .. > /dev/null 2>&1 + make > /dev/null 2>&1 + make install > /dev/null 2>&1 + + rm -rf $WORK_DIR/taosdemo + cp -r $TDENGINE_DIR/src/kit/taosdemo $WORK_DIR + CURRENT_VERSION=`taosd -V | grep version | awk '{print $3}' | awk -F. '{print $3}'` +} + +function buildTDengineByVersion() { + echoInfo "build TDengine on branch: $1" + git reset --hard HEAD + git checkout $1 + git pull > /dev/null + + rm -rf $TDENGINE_DIR/src/kit/taosdemo + cp -r $WORK_DIR/taosdemo $TDENGINE_DIR/src/kit + + cd $TDENGINE_DIR/debug + rm -rf * + cmake .. > /dev/null 2>&1 + make > /dev/null 2>&1 + make install > /dev/null 2>&1 +} + +function stopTaosd { + echo "Stop taosd" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + +function startTaosd { + echo "Start taosd" + rm -rf /var/lib/perf/* + rm -rf /var/log/perf/* + nohup taosd -c /etc/perf/ > /dev/null 2>&1 & + sleep 10 +} + +function runTaosdemoCompare { + echoInfo "Stop Taosd" + stopTaosd + + getCurrentVersion + release="master" + + [ -f $TAOSDEMO_COMPARE_TEST_REPORT ] && rm $TAOSDEMO_COMPARE_TEST_REPORT + + for((i=0;i<$NUM_OF_VERSIONS;i++)) + do + startTaosd + taos -s "drop database if exists demodb;" + taosdemo -y -d demodb > taosdemoperf.txt + + echo "==================== taosdemo performance for $release ====================" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + CREATE_TABLE_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` + INSERT_RECORDS_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` + RECORDS_PER_SECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'` + AVG_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $4}' | awk -Fm '{print $1}'` + MAX_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $6}' | awk -Fm '{print $1}'` + MIN_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $8}' | awk -Fm '{print $1}'` + + echo "create table time: $CREATE_TABLE_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "insert records time: $INSERT_RECORDS_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "records per second: $RECORDS_PER_SECOND records/second" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "avg delay: $AVG_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "max delay: $MAX_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "min delay: $MIN_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + + [ -f taosdemoperf.txt ] && rm taosdemoperf.txt + + stopTaosd + version=`expr $CURRENT_VERSION - $i` + release="release/s1$version" + buildTDengineByVersion $release + done +} + +function sendReport { + echo "send report" + receiver="develop@taosdata.com" + mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" + + cd $TDENGINE_DIR + + sed -i 's/\x1b\[[0-9;]*m//g' $TAOSDEMO_COMPARE_TEST_REPORT + BODY_CONTENT=`cat $TAOSDEMO_COMPARE_TEST_REPORT` + echo -e "to: ${receiver}\nsubject: taosdemo performance compare test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + (cat - && uuencode $TAOSDEMO_COMPARE_TEST_REPORT taosdemo-compare-test-report-$today.log) | \ + ssmtp "${receiver}" && echo "Report Sent!" +} + +runTaosdemoCompare +sendReport + +echoInfo "End of Taosdemo Compare Test" | tee -a $WORK_DIR/cron.log \ No newline at end of file diff --git a/tests/pytest/alter/alterTabAddTagWithNULL.py b/tests/pytest/alter/alterTabAddTagWithNULL.py new file mode 100644 index 0000000000000000000000000000000000000000..52bdc0fe75b6edc1d75e8dacd468ea4493c88271 --- /dev/null +++ b/tests/pytest/alter/alterTabAddTagWithNULL.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute("create table stbtag (ts timestamp, c1 int) TAGS(t1 int)") + tdSql.execute("create table tag1 using stbtag tags(1)") + + tdLog.printNoPrefix("==========step2:alter stb add tag create new chiltable") + tdSql.execute("alter table stbtag add tag t2 int") + tdSql.execute("alter table stbtag add tag t3 tinyint") + tdSql.execute("alter table stbtag add tag t4 smallint ") + tdSql.execute("alter table stbtag add tag t5 bigint") + tdSql.execute("alter table stbtag add tag t6 float ") + tdSql.execute("alter table stbtag add tag t7 double ") + tdSql.execute("alter table stbtag add tag t8 bool ") + tdSql.execute("alter table stbtag add tag t9 binary(10) ") + tdSql.execute("alter table stbtag add tag t10 nchar(10)") + + tdSql.execute("create table tag2 using stbtag tags(2, 22, 23, 24, 25, 26.1, 27.1, 1, 'binary9', 'nchar10')") + tdSql.query( "select tbname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10 from stbtag" ) + tdSql.checkData(1, 0, "tag2") + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 22) + tdSql.checkData(1, 3, 23) + tdSql.checkData(1, 4, 24) + tdSql.checkData(1, 5, 25) + tdSql.checkData(1, 6, 26.1) + tdSql.checkData(1, 7, 27.1) + tdSql.checkData(1, 8, 1) + tdSql.checkData(1, 9, "binary9") + tdSql.checkData(1, 10, "nchar10") + + tdLog.printNoPrefix("==========step3:alter stb drop tag create new chiltable") + tdSql.execute("alter table stbtag drop tag t2 ") + tdSql.execute("alter table stbtag drop tag t3 ") + tdSql.execute("alter table stbtag drop tag t4 ") + tdSql.execute("alter table stbtag drop tag t5 ") + tdSql.execute("alter table stbtag drop tag t6 ") + tdSql.execute("alter table stbtag drop tag t7 ") + tdSql.execute("alter table stbtag drop tag t8 ") + tdSql.execute("alter table stbtag drop tag t9 ") + tdSql.execute("alter table stbtag drop tag t10 ") + + tdSql.execute("create table tag3 using stbtag tags(3)") + tdSql.query("select * from stbtag where tbname like 'tag3' ") + tdSql.checkCols(3) + tdSql.query("select tbname, t1 from stbtag where tbname like 'tag3' ") + tdSql.checkData(0, 1, 3) + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/alter/alterTimestampColDataProcess.py b/tests/pytest/alter/alterTimestampColDataProcess.py new file mode 100644 index 0000000000000000000000000000000000000000..b235a8bf4c0c7166b66f87642d477ba78bccc44d --- /dev/null +++ b/tests/pytest/alter/alterTimestampColDataProcess.py @@ -0,0 +1,73 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + ts1 = 0 + ts2 = -28800000 + ts3 = -946800000000 + ts4 = "1950-01-01 00:00:00" + tdSql.execute( + "create table stb2ts (ts timestamp, ts1 timestamp, ts2 timestamp, c1 int, ts3 timestamp) TAGS(t1 int)" + ) + tdSql.execute("create table t2ts1 using stb2ts tags(1)") + + tdSql.execute(f"insert into t2ts1 values ({ts1}, {ts1}, {ts1}, 1, {ts1})") + tdSql.execute(f"insert into t2ts1 values ({ts2}, {ts2}, {ts2}, 2, {ts2})") + tdSql.execute(f"insert into t2ts1 values ({ts3}, {ts3}, {ts3}, 4, {ts3})") + tdSql.execute(f"insert into t2ts1 values ('{ts4}', '{ts4}', '{ts4}', 3, '{ts4}')") + + tdLog.printNoPrefix("==========step2:check inserted data") + tdSql.query("select * from stb2ts where ts1=0 and ts2='1970-01-01 08:00:00' ") + tdSql.checkRows(1) + tdSql.checkData(0, 4,'1970-01-01 08:00:00') + + tdSql.query("select * from stb2ts where ts1=-28800000 and ts2='1970-01-01 00:00:00' ") + tdSql.checkRows(1) + tdSql.checkData(0, 4, '1970-01-01 00:00:00') + + tdSql.query("select * from stb2ts where ts1=-946800000000 and ts2='1940-01-01 00:00:00' ") + tdSql.checkRows(1) + tdSql.checkData(0, 4, '1940-01-01 00:00:00') + + tdSql.query("select * from stb2ts where ts1=-631180800000 and ts2='1950-01-01 00:00:00' ") + tdSql.checkRows(1) + tdSql.checkData(0, 4, '1950-01-01 00:00:00') + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/client/thousandsofClient.py b/tests/pytest/client/thousandsofClient.py new file mode 100644 index 0000000000000000000000000000000000000000..36c816aa5bd9ee9ed788b77c1f881f5e76adace5 --- /dev/null +++ b/tests/pytest/client/thousandsofClient.py @@ -0,0 +1,55 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/home/chr/taosdata/TDengine/sim/dnode1/cfg " + + def newCloseCon(times): + newConList = [] + for times in range(0,times) : + newConList.append(taos.connect(self.host, self.user, self.password, self.config)) + for times in range(0,times) : + newConList[times].close() + + def run(self): + tdDnodes.init("") + tdDnodes.setTestCluster(False) + tdDnodes.setValgrind(False) + + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + + # multiple new and cloes connection + for m in range(1,101) : + t= threading.Thread(target=newCloseCon,args=(10,)) + t.start() + + +clients = TwoClients() +clients.initConnection() +clients.run() \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/how-to-use b/tests/pytest/cluster/TD-3693/how-to-use new file mode 100644 index 0000000000000000000000000000000000000000..05a16a8534b07849f933dc7d6c33ea6306854ba0 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/how-to-use @@ -0,0 +1,9 @@ +execute: +cd TDengine/tests/pytest && python3 ./test.py -f cluster/TD-3693/multClient.py && python3 cluster/TD-3693/multQuery.py + +1. 使用测试的集群,三个节点fc1、fct2、fct4。 +2. 用taosdemo建两个库db1和db2,副本数目为1,插入一定数据。 +3. db1在mnode的master上(fct2),db2在mnode的slave上(fct4)。 +4. 珲哥修改taosdemo,变成多线程查询,修改后的软件我命名成taosdemoMul,然后做持续多线程查询db2上的数据,建立多个连接 +5. 4中查询过程放到后台,同时再次在db2执行建表、插入,查询操作。循环执行查询10次,每次间隔91s。 +6. 然后查询taosd的log日志,看是否还存在上述问题“send auth msg to mnodes”。 \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/insert1Data.json b/tests/pytest/cluster/TD-3693/insert1Data.json new file mode 100644 index 0000000000000000000000000000000000000000..3ac289a63a846c7de117ce6171ad023ca3f56211 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/insert1Data.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/cluster/TD-3693/insert2Data.json b/tests/pytest/cluster/TD-3693/insert2Data.json new file mode 100644 index 0000000000000000000000000000000000000000..25717df4c76f59e8ef7d638c8793a391ff338a7c --- /dev/null +++ b/tests/pytest/cluster/TD-3693/insert2Data.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/cluster/TD-3693/multClient.py b/tests/pytest/cluster/TD-3693/multClient.py new file mode 100644 index 0000000000000000000000000000000000000000..24c27d9de9ff383f412af33e8d5f8318d1032f63 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/multClient.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.rowNum = 100000 + self.ts = 1537146000000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert data to cluster'db + os.system("%staosdemo -f cluster/TD-3693/insert1Data.json -y " % binPath) + # multiple new and cloes connection with query data + os.system("%staosdemo -f cluster/TD-3693/insert2Data.json -y " % binPath) + os.system("nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & " % binPath) + + + + # delete useless files + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf ./querySystemInfo*") + os.system("rm -rf cluster/TD-3693/multClient.py.sql") + os.system("rm -rf ./querySystemInfo*") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/cluster/TD-3693/multQuery.py b/tests/pytest/cluster/TD-3693/multQuery.py new file mode 100644 index 0000000000000000000000000000000000000000..70061a27f2ccb7cdef9cdea1b62cd0060f972c3c --- /dev/null +++ b/tests/pytest/cluster/TD-3693/multQuery.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "fct4" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + # query data from cluster'db + conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config) + cur = conn.cursor() + tdSql.init(cur, True) + tdSql.execute("use db2") + cur.execute("select count (tbname) from stb0") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400000) + tdSql.execute("drop table if exists squerytest") + tdSql.execute("drop table if exists querytest") + tdSql.execute('''create stable squerytest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table querytest using squerytest tags('beijing')") + tdSql.execute("insert into querytest(ts) values(%d)" % (self.ts - 1)) + for i in range(self.rowNum): + tdSql.execute("insert into querytest values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + for j in range(10): + tdSql.execute("use db2") + tdSql.query("select count(*),last(*) from querytest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(1, 3, 1) + sleep(88) + tdSql.execute("drop table if exists squerytest") + tdSql.execute("drop table if exists querytest") + +clients = TwoClients() +clients.initConnection() +clients.run() \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/queryCount.json b/tests/pytest/cluster/TD-3693/queryCount.json new file mode 100644 index 0000000000000000000000000000000000000000..089ae42aab379f806d13fd4dec66af680b546154 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/queryCount.json @@ -0,0 +1,15 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db2", + "query_times": 1000000, + "specified_table_query": + {"query_interval":1, "concurrent":100, + "sqls": [{"sql": "select count(*) from db.stb0", "result": ""}] + } +} \ No newline at end of file diff --git a/tests/pytest/crash_gen/README.md b/tests/pytest/crash_gen/README.md index 6788ab1a63d0a7c515558695605d1ec8ac5fb7f9..8cdddd5b57c31d563b38d3e8769ab8dc19642347 100644 --- a/tests/pytest/crash_gen/README.md +++ b/tests/pytest/crash_gen/README.md @@ -6,6 +6,25 @@ To effectively test and debug our TDengine product, we have developed a simple t exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems, hopefully without a pre-determined scenario. +# Features + +This tool can run as a test client with the following features: + +1. Any number of concurrent threads +1. Any number of test steps/loops +1. Auto-create and writing to multiple databases +1. Ignore specific error codes +1. Write small or large data blocks +1. Auto-generate out-of-sequence data, if needed +1. Verify the result of write operations +1. Concurrent writing to a shadow database for later data verification +1. User specified number of replicas to use, against clusters + +This tool can also use to start a TDengine service, either in stand-alone mode or +cluster mode. The features include: + +1. User specified number of D-Nodes to create/use. + # Preparation To run this tool, please ensure the followed preparation work is done first. @@ -16,7 +35,7 @@ To run this tool, please ensure the followed preparation work is done first. Ubuntu 20.04LTS as our own development environment, and suggest you also use such an environment if possible. -# Simple Execution +# Simple Execution as Client Test Tool To run the tool with the simplest method, follow the steps below: @@ -28,19 +47,21 @@ To run the tool with the simplest method, follow the steps below: That's it! -# Running Clusters +# Running Server-side Clusters This tool also makes it easy to test/verify the clustering capabilities of TDengine. You can start a cluster quite easily with the following command: ``` $ cd tests/pytest/ -$ ./crash_gen.sh -e -o 3 +$ rm -rf ../../build/cluster_dnode_?; ./crash_gen.sh -e -o 3 # first part optional ``` The `-e` option above tells the tool to start the service, and do not run any tests, while the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster. -Obviously you can adjust the the number here. +Obviously you can adjust the the number here. The `rm -rf` command line is optional +to clean up previous cluster data, so that we can start from a clean state with no data +at all. ## Behind the Scenes @@ -89,8 +110,9 @@ The exhaustive features of the tool is available through the `-h` option: ``` $ ./crash_gen.sh -h -usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r] - [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x] +usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] + [-i NUM_REPLICAS] [-k] [-l] [-m] [-n] + [-o NUM_DNODES] [-p] [-r] [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-w] [-x] TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) --------------------------------------------------------------------- @@ -109,11 +131,14 @@ optional arguments: -e, --run-tdengine Run TDengine service in foreground (default: false) -g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS Ignore error codes, comma separated, 0x supported (default: None) - -i MAX_REPLICAS, --max-replicas MAX_REPLICAS - Maximum number of replicas to use, when testing against clusters. (default: 1) + -i NUM_REPLICAS, --num-replicas NUM_REPLICAS + Number (fixed) of replicas to use, when testing against clusters. (default: 1) + -k, --track-memory-leaks + Use Valgrind tool to track memory leaks (default: false) -l, --larger-data Write larger amount of data during write operations (default: false) + -m, --mix-oos-data Mix out-of-sequence data into the test data stream (default: true) -n, --dynamic-db-table-names - Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false) + Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false) -o NUM_DNODES, --num-dnodes NUM_DNODES Number of Dnodes to initialize, used with -e option. (default: 1) -p, --per-thread-db-connection @@ -124,6 +149,7 @@ optional arguments: -t NUM_THREADS, --num-threads NUM_THREADS Number of threads to run (default: 10) -v, --verify-data Verify data written in a number of places by reading back (default: false) + -w, --use-shadow-db Use a shaddow database to verify data integrity (default: false) -x, --continue-on-exception Continue execution after encountering unexpected/disallowed errors/exceptions (default: false) ``` diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 0fe94bad1d6b36aa1be5d47428dae19e918ce6e4..44295e8bee72bd4af398569887da69ab06234be5 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1574,9 +1574,9 @@ class TaskCreateDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # was: self.execWtSql(wt, "create database db") repStr = "" - if gConfig.max_replicas != 1: + if gConfig.num_replicas != 1: # numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N - numReplica = gConfig.max_replicas # fixed, always + numReplica = gConfig.num_replicas # fixed, always repStr = "replica {}".format(numReplica) updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active dbName = self._db.getName() @@ -2394,11 +2394,16 @@ class MainExec: help='Ignore error codes, comma separated, 0x supported (default: None)') parser.add_argument( '-i', - '--max-replicas', + '--num-replicas', action='store', default=1, type=int, - help='Maximum number of replicas to use, when testing against clusters. (default: 1)') + help='Number (fixed) of replicas to use, when testing against clusters. (default: 1)') + parser.add_argument( + '-k', + '--track-memory-leaks', + action='store_true', + help='Use Valgrind tool to track memory leaks (default: false)') parser.add_argument( '-l', '--larger-data', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index ae6f8d5d3a5a4d6a8bf745a48ec2368aa5e832ad..cdbf2db4dab81ab07b51597eb0a2805cc931e34e 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -19,6 +19,7 @@ from queue import Queue, Empty from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress from .db import DbConn, DbTarget +import crash_gen.settings class TdeInstance(): """ @@ -132,6 +133,7 @@ keep 36500 walLevel 1 # # maxConnections 100 +quorum 2 """ cfgContent = cfgTemplate.format_map(cfgValues) f = open(cfgFile, "w") @@ -164,7 +166,12 @@ walLevel 1 return "127.0.0.1" def getServiceCmdLine(self): # to start the instance - return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + cmdLine = [] + if crash_gen.settings.gConfig.track_memory_leaks: + Logging.info("Invoking VALGRIND on service...") + cmdLine = ['valgrind', '--leak-check=yes'] + cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + return cmdLine def _getDnodes(self, dbc): dbc.query("show dnodes") @@ -202,7 +209,7 @@ walLevel 1 self.generateCfgFile() # service side generates config file, client does not self.rotateLogs() - self._smThread.start(self.getServiceCmdLine()) + self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions def stop(self): self._smThread.stop() @@ -225,7 +232,7 @@ class TdeSubProcess: # RET_SUCCESS = -4 def __init__(self): - self.subProcess = None + self.subProcess = None # type: subprocess.Popen # if tInst is None: # raise CrashGenError("Empty instance not allowed in TdeSubProcess") # self._tInst = tInst # Default create at ServiceManagerThread @@ -263,7 +270,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) # print("Starting TDengine via Shell: {}".format(cmdLineStr)) - useShell = True + useShell = True # Needed to pass environments into it self.subProcess = subprocess.Popen( # ' '.join(cmdLine) if useShell else cmdLine, # shell=useShell, @@ -276,12 +283,12 @@ class TdeSubProcess: env=myEnv ) # had text=True, which interferred with reading EOF - STOP_SIGNAL = signal.SIGKILL # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? + STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm def stop(self): """ - Stop a sub process, and try to return a meaningful return code. + Stop a sub process, DO NOT return anything, process all conditions INSIDE Common POSIX signal values (from man -7 signal): SIGHUP 1 @@ -301,40 +308,99 @@ class TdeSubProcess: """ if not self.subProcess: Logging.error("Sub process already stopped") - return # -1 + return retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) if retCode: # valid return code, process ended - retCode = -retCode # only if valid + # retCode = -retCode # only if valid Logging.warning("TSP.stop(): process ended itself") self.subProcess = None - return retCode + return # process still alive, let's interrupt it - Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL)) + self._stopForSure(self.subProcess, self.STOP_SIGNAL) # success if no exception + self.subProcess = None + # sub process should end, then IPC queue should end, causing IO thread to end - topSubProc = psutil.Process(self.subProcess.pid) - for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False - child.send_signal(self.STOP_SIGNAL) - time.sleep(0.2) # 200 ms - # topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell) - - self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell) - self.subProcess.wait(20) - retCode = self.subProcess.returncode # should always be there - # May throw subprocess.TimeoutExpired exception above, therefore - # The process is guranteed to have ended by now - self.subProcess = None - if retCode == self.SIG_KILL_RETCODE: - Logging.info("TSP.stop(): sub proc KILLED, as expected") - elif retCode == (- self.STOP_SIGNAL): - Logging.info("TSP.stop(), sub process STOPPED, as expected") - elif retCode != 0: # != (- signal.SIGINT): - Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format( - self.STOP_SIGNAL, retCode)) - else: - Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL)) - return - retCode + + @classmethod + def _stopForSure(cls, proc: subprocess.Popen, sig: int): + ''' + Stop a process and all sub processes with a singal, and SIGKILL if necessary + ''' + def doKillTdService(proc: subprocess.Popen, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(proc.pid, sig)) + proc.send_signal(sig) + try: + retCode = proc.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(proc.pid)) + elif (- retCode) == sig : + Logging.info("TD service terminated with expected return code {}".format(sig)) + else: + Logging.warning("TD service terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except subprocess.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(proc.pid, sig)) + return False # failed to terminate + + + def doKillChild(child: psutil.Process, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(child.pid, sig)) + child.send_signal(sig) + try: + retCode = child.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(child.pid)) + elif (- retCode) == sig : + Logging.info("Sub-sub process terminated with expected return code {}".format(sig)) + else: + Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except psutil.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(child.pid, sig)) + return False # did not terminate + + def doKill(proc: subprocess.Popen, sig: int): + pid = proc.pid + try: + topSubProc = psutil.Process(pid) + for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False + Logging.warning("Unexpected child to be killed") + doKillChild(child, sig) + except psutil.NoSuchProcess as err: + Logging.info("Process not found, can't kill, pid = {}".format(pid)) + + return doKillTdService(proc, sig) + # TODO: re-examine if we need to kill the top process, which is always the SHELL for now + # try: + # proc.wait(1) # SHELL process here, may throw subprocess.TimeoutExpired exception + # # expRetCode = self.SIG_KILL_RETCODE if sig==signal.SIGKILL else (-sig) + # # if retCode == expRetCode: + # # Logging.info("Process terminated with expected return code {}".format(retCode)) + # # else: + # # Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(expRetCode, retCode)) + # # return True # success + # except subprocess.TimeoutExpired as err: + # Logging.warning("Failed to kill process {} with signal {}".format(pid, sig)) + # return False # failed to kill + + def softKill(proc, sig): + return doKill(proc, sig) + + def hardKill(proc): + return doKill(proc, signal.SIGKILL) + + + + pid = proc.pid + Logging.info("Terminate running processes under {}, with SIG #{} and wait...".format(pid, sig)) + if softKill(proc, sig): + return# success + if sig != signal.SIGKILL: # really was soft above + if hardKill(proc): + return + raise CrashGenError("Failed to stop process, pid={}".format(pid)) class ServiceManager: PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process @@ -560,7 +626,8 @@ class ServiceManagerThread: # self._tInstNum = tInstNum # instance serial number in cluster, ZERO based # self._tInst = tInst or TdeInstance() # Need an instance - self._thread = None # The actual thread, # type: threading.Thread + self._thread = None # The actual thread, # type: threading.Thread + self._thread2 = None # watching stderr self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually. def __repr__(self): @@ -568,11 +635,20 @@ class ServiceManagerThread: self.getStatus(), self._tdeSubProcess) def getStatus(self): + ''' + Get the status of the process being managed. (misnomer alert!) + ''' return self._status # Start the thread (with sub process), and wait for the sub service # to become fully operational - def start(self, cmdLine): + def start(self, cmdLine : str, logDir: str): + ''' + Request the manager thread to start a new sub process, and manage it. + + :param cmdLine: the command line to invoke + :param logDir: the logging directory, to hold stdout/stderr files + ''' if self._thread: raise RuntimeError("Unexpected _thread") if self._tdeSubProcess: @@ -582,20 +658,30 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STARTING) self._tdeSubProcess = TdeSubProcess() - self._tdeSubProcess.start(cmdLine) + self._tdeSubProcess.start(cmdLine) # TODO: verify process is running self._ipcQueue = Queue() self._thread = threading.Thread( # First thread captures server OUTPUT target=self.svcOutputReader, - args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue, logDir)) self._thread.daemon = True # thread dies with the program self._thread.start() + time.sleep(0.01) + if not self._thread.is_alive(): # What happened? + Logging.info("Failed to started process to monitor STDOUT") + self.stop() + raise CrashGenError("Failed to start thread to monitor STDOUT") + Logging.info("Successfully started process to monitor STDOUT") self._thread2 = threading.Thread( # 2nd thread captures server ERRORs target=self.svcErrorReader, - args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdErr(), self._ipcQueue, logDir)) self._thread2.daemon = True # thread dies with the program self._thread2.start() + time.sleep(0.01) + if not self._thread2.is_alive(): + self.stop() + raise CrashGenError("Failed to start thread to monitor STDERR") # wait for service to start for i in range(0, 100): @@ -643,7 +729,7 @@ class ServiceManagerThread: Logging.info("Service already stopped") return if self.getStatus().isStopping(): - Logging.info("Service is already being stopped") + Logging.info("Service is already being stopped, pid: {}".format(self._tdeSubProcess.getPid())) return # Linux will send Control-C generated SIGINT to the TDengine process # already, ref: @@ -653,14 +739,14 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STOPPING) # retCode = self._tdeSubProcess.stop() - try: - retCode = self._tdeSubProcess.stop() - # print("Attempted to stop sub process, got return code: {}".format(retCode)) - if retCode == signal.SIGSEGV : # SGV - Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") - except subprocess.TimeoutExpired as err: - Logging.info("Time out waiting for TDengine service process to exit") - else: + # try: + # retCode = self._tdeSubProcess.stop() + # # print("Attempted to stop sub process, got return code: {}".format(retCode)) + # if retCode == signal.SIGSEGV : # SGV + # Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") + # except subprocess.TimeoutExpired as err: + # Logging.info("Time out waiting for TDengine service process to exit") + if not self._tdeSubProcess.stop(): # everything withing if self._tdeSubProcess.isRunning(): # still running, should now never happen Logging.error("FAILED to stop sub process, it is still running... pid = {}".format( self._tdeSubProcess.getPid())) @@ -683,16 +769,18 @@ class ServiceManagerThread: raise RuntimeError( "SMT.Join(): Unexpected status: {}".format(self._status)) - if self._thread: - self._thread.join() - self._thread = None - self._status.set(Status.STATUS_STOPPED) - # STD ERR thread - self._thread2.join() - self._thread2 = None + if self._thread or self._thread2 : + if self._thread: + self._thread.join() + self._thread = None + if self._thread2: # STD ERR thread + self._thread2.join() + self._thread2 = None else: print("Joining empty thread, doing nothing") + self._status.set(Status.STATUS_STOPPED) + def _trimQueue(self, targetSize): if targetSize <= 0: return # do nothing @@ -739,11 +827,22 @@ class ServiceManagerThread: print(pBar, end="", flush=True) print('\b\b\b\b', end="", flush=True) - def svcOutputReader(self, out: IO, queue): + def svcOutputReader(self, out: IO, queue, logDir: str): + ''' + The infinite routine that processes the STDOUT stream for the sub process being managed. + + :param out: the IO stream object used to fetch the data from + :param queue: the queue where we dump the roughly parsed line-by-line data + :param logDir: where we should dump a verbatim output file + ''' + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stdout.log') + fOut = open(logFile, 'wb') # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # print("This is the svcOutput Reader...") # for line in out : for line in iter(out.readline, b''): + fOut.write(line) # print("Finished reading a line: {}".format(line)) # print("Adding item to queue...") try: @@ -772,10 +871,16 @@ class ServiceManagerThread: # queue.put(line) # meaning sub process must have died Logging.info("EOF for TDengine STDOUT: {}".format(self)) - out.close() + out.close() # Close the stream + fOut.close() # Close the output file - def svcErrorReader(self, err: IO, queue): + def svcErrorReader(self, err: IO, queue, logDir: str): + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stderr.log') + fErr = open(logFile, 'wb') for line in iter(err.readline, b''): + fErr.write(line) Logging.info("TDengine STDERR: {}".format(line)) Logging.info("EOF for TDengine STDERR: {}".format(self)) - err.close() \ No newline at end of file + err.close() + fErr.close() \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e7e05866369666eb40ccbc6186a7a60ea3595dbf..3528b01dbda4da7476fabb5763405091e2ea5cdd 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -151,6 +151,9 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + # update python3 ./test.py -f update/allow_update.py @@ -219,6 +222,11 @@ python3 ./test.py -f query/bug3351.py python3 ./test.py -f query/bug3375.py python3 ./test.py -f query/queryJoin10tables.py python3 ./test.py -f query/queryStddevWithGroupby.py +python3 ./test.py -f query/querySecondtscolumnTowherenow.py +python3 ./test.py -f query/queryFilterTswithDateUnit.py +python3 ./test.py -f query/queryTscomputWithNow.py + + #stream python3 ./test.py -f stream/metric_1.py @@ -231,9 +239,13 @@ python3 ./test.py -f stream/history.py python3 ./test.py -f stream/sys.py python3 ./test.py -f stream/table_1.py python3 ./test.py -f stream/table_n.py +python3 ./test.py -f stream/showStreamExecTimeisNull.py +python3 ./test.py -f stream/cqSupportBefore1970.py #alter table python3 ./test.py -f alter/alter_table_crash.py +python3 ./test.py -f alter/alterTabAddTagWithNULL.py +python3 ./test.py -f alter/alterTimestampColDataProcess.py # client python3 ./test.py -f client/client.py @@ -274,6 +286,7 @@ python3 ./test.py -f functions/all_null_value.py python3 ./test.py -f functions/function_avg.py -r 1 python3 ./test.py -f functions/function_bottom.py -r 1 python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_count_last_stab.py python3 ./test.py -f functions/function_diff.py -r 1 python3 ./test.py -f functions/function_first.py -r 1 python3 ./test.py -f functions/function_last.py -r 1 @@ -303,6 +316,11 @@ python3 ./test.py -f insert/unsignedSmallint.py python3 ./test.py -f insert/unsignedTinyint.py python3 ./test.py -f query/filterAllUnsignedIntTypes.py +python3 ./test.py -f tag_lite/unsignedInt.py +python3 ./test.py -f tag_lite/unsignedBigint.py +python3 ./test.py -f tag_lite/unsignedSmallint.py +python3 ./test.py -f tag_lite/unsignedTinyint.py + python3 ./test.py -f functions/function_percentile2.py python3 ./test.py -f insert/boundary2.py python3 ./test.py -f alter/alter_debugFlag.py diff --git a/tests/pytest/functions/function_count_last_stab.py b/tests/pytest/functions/function_count_last_stab.py new file mode 100644 index 0000000000000000000000000000000000000000..1d777c6bd314941036f542c7d0e9063e590fa7dd --- /dev/null +++ b/tests/pytest/functions/function_count_last_stab.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create stable stest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using stest tags('beijing')") + tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) + + + # last verifacation + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select count(*),last(*) from stest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(1, 3, 1) + + tdSql.query("select count(*),last(*) from stest group by col2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 2, 10) + tdSql.checkData(0, 3, 1) + + tdSql.query("select count(*),last(ts,stest.*) from stest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 2, "2018-09-17 09:00:00") + tdSql.checkData(1, 4, 1) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/functions/function_operations.py b/tests/pytest/functions/function_operations.py index 162aa3eb658a60615c9f20de0c363f9066096f0c..859cd78a3d396cf6a4fd9d08c24f6f7bd6ac324c 100644 --- a/tests/pytest/functions/function_operations.py +++ b/tests/pytest/functions/function_operations.py @@ -82,14 +82,14 @@ class TDTestCase: self.ts = self.ts + self.rowNum + 10 - tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 1 )) + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 254, 65534, 4294967294, 18446744073709551614)" % ( self.ts + self.rowNum + 1 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 )) tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 )) self.rowNum = self.rowNum + 5 - col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' ] + col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' , '18446744073709551614' ] op_list = [ '+' , '-' , '*' , '/' , '%' ] err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ] order_lsit = [ ' order by ts ', ' order by ts desc ', ' order by ts asc '] diff --git a/tests/pytest/perfbenchmark/joinPerformance.py b/tests/pytest/perfbenchmark/joinPerformance.py new file mode 100644 index 0000000000000000000000000000000000000000..acd4f88c9b1a2df91f7e129ac87d57c9b04c7106 --- /dev/null +++ b/tests/pytest/perfbenchmark/joinPerformance.py @@ -0,0 +1,355 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import os +import json +import argparse +import subprocess +import datetime +import re + +from multiprocessing import cpu_count +# from util.log import * +# from util.sql import * +# from util.cases import * +# from util.dnodes import * + +class JoinPerf: + + def __init__(self, clearCache, dbName, keep): + self.clearCache = clearCache + self.dbname = dbName + self.drop = "yes" + self.keep = keep + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taosperf" + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + + # def init(self, conn, logSql): + # tdLog.debug(f"start to excute {__file__}") + # tdSql.init(conn.cursor()) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + return self.getBuildPath() + "/sim/dnode1/cfg" + + # def initConnection(self): + # return self.getCfgDir() + + # def connectDB(self): + # self.conn = taos.connect( + # self.host, + # self.user, + # self.password, + # self.getCfgDir()) + # return self.conn.cursor() + + def dbinfocfg(self) -> dict: + return { + "name": self.dbname, + "drop": self.drop, + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": self.keep, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # def column_tag_count(self, **column_tag) -> list : + # return [{"type": type, "count": count} for type, count in column_tag.items()] + + def type_check(func): + def wrapper(self, **kwargs): + num_types = ["int", "float", "bigint", "tinyint", "smallint", "double"] + str_types = ["binary", "nchar"] + for k ,v in kwargs.items(): + if k.lower() not in num_types and k.lower() not in str_types: + return f"args {k} type error, not allowed" + elif not isinstance(v, (int, list, tuple)): + return f"value {v} type error, not allowed" + elif k.lower() in num_types and not isinstance(v, int): + return f"arg {v} takes 1 positional argument must be type int " + elif isinstance(v, (list,tuple)) and len(v) > 2: + return f"arg {v} takes from 1 to 2 positional arguments but more than 2 were given " + elif isinstance(v,(list,tuple)) and [ False for _ in v if not isinstance(_, int) ]: + return f"arg {v} takes from 1 to 2 positional arguments must be type int " + else: + pass + return func(self, **kwargs) + return wrapper + + @type_check + def column_tag_count(self, **column_tag) -> list : + init_column_tag = [] + for k, v in column_tag.items(): + if re.search(k, "int, float, bigint, tinyint, smallint, double", re.IGNORECASE): + init_column_tag.append({"type": k, "count": v}) + elif re.search(k, "binary, nchar", re.IGNORECASE): + if isinstance(v, int): + init_column_tag.append({"type": k, "count": v, "len":8}) + elif len(v) == 1: + init_column_tag.append({"type": k, "count": v[0], "len": 8}) + else: + init_column_tag.append({"type": k, "count": v[0], "len": v[1]}) + return init_column_tag + + def stbcfg(self, stb: str, child_tab_count: int, prechildtab: str, columns: dict, tags: dict) -> dict: + return { + "name": stb, + "child_table_exists": "no", + "childtable_count": child_tab_count, + "childtable_prefix": prechildtab, + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 50, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": self.column_tag_count(**columns), + "tags": self.column_tag_count(**tags) + } + + def createcfg(self,db: dict, stbs: list) -> dict: + return { + "filetype": "insert", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "thread_count": cpu_count(), + "thread_count_create_tbl": cpu_count(), + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": db, + "super_tables": stbs + }] + } + + def createinsertfile(self,db: dict, stbs: list) -> str: + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(self.createcfg(db, stbs), f) + + return file_create_table + + def querysqls(self, sql: str) -> list: + return [{"sql":sql,"result":""}] + + def querycfg(self, sql: str) -> dict: + return { + "filetype": "query", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "confirm_parameter_prompt": "yes", + "databases": "db", + "specified_table_query": { + "query_interval": 0, + "concurrent": cpu_count(), + "sqls": self.querysqls(sql) + } + } + + def createqueryfile(self, sql: str): + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_query_table = f"/tmp/query_{date}.json" + + with open(file_query_table,"w") as f: + json.dump(self.querycfg(sql), f) + + return file_query_table + + def taosdemotable(self, filepath, resultfile="/dev/null"): + taosdemopath = self.getBuildPath() + "/debug/build/bin" + with open(filepath,"r") as f: + filetype = json.load(f)["filetype"] + if filetype == "insert": + taosdemo_table_cmd = f"{taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + else: + taosdemo_table_cmd = f"yes | {taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + _ = subprocess.check_output(taosdemo_table_cmd, shell=True).decode("utf-8") + + def droptmpfile(self): + drop_file_cmd = "rm -f /tmp/query_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f querySystemInfo-*" + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f /tmp/insert_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + + def run(self): + print("========== join on table schema performance ==========") + if self.clearCache == True: + # must be root permission + subprocess.check_output("echo 3 > /proc/sys/vm/drop_caches") + + # cfg = { + # 'enableRecordSql': '1' + # } + # tdDnodes.deploy(1, cfg) + + # tdLog.printNoPrefix("==========step1: create 1024 columns on different data type==========") + # db = self.dbinfocfg() + # stblist = [] + # # the supertable config for each type in column_tag_types + # column_tag_types = ["INT","FLOAT","BIGINT","TINYINT","SMALLINT","DOUBLE","BINARY","NCHAR"] + # for i in range(len(column_tag_types)): + # tagtype = { + # "INT": 0, + # "FLOAT": 0, + # "BIGINT": 0, + # "TINYINT": 0, + # "SMALLINT": 0, + # "DOUBLE": 0, + # "BINARY": 0, + # "NCHAR": 0 + # } + # columntype = tagtype.copy() + # tagtype["INT"] = 2 + # if column_tag_types[i] == "BINARY": + # columntype[column_tag_types[i]] = [509, 10] + # elif column_tag_types[i] == "NCHAR": + # columntype[column_tag_types[i]] = [350, 10] + # else: + # columntype[column_tag_types[i]] = 1021 + # supertable = self.stbcfg( + # stb=f"stb{i}", + # child_tab_count=2, + # prechildtab=f"t{i}", + # columns=columntype, + # tags=tagtype + # ) + # stblist.append(supertable) + # self.taosdemotable(self.createinsertfile(db=db, stbs=stblist)) + + # tdLog.printNoPrefix("==========step2: execute query operation==========") + # tdLog.printNoPrefix("==========execute query operation==========") + sqls = { + "nchar":"select * from t00,t01 where t00.ts=t01.ts" + } + for type, sql in sqls.items(): + result_file = f"/tmp/queryResult_{type}.log" + self.taosdemotable(self.createqueryfile(sql), resultfile=result_file) + if result_file: + # tdLog.printNoPrefix(f"execute type {type} sql, the sql is: {sql}") + print(f"execute type {type} sql, the sql is: {sql}") + max_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{max=$7;next}}{{max=max>$7?max:$7}}END{{print "Max=",max,"s"}}' + ''' + max_sql_time = subprocess.check_output(max_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {max_sql_time}") + print(f"type: {type} sql time : {max_sql_time}") + + min_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{min=$7;next}}{{min=min<$7?min:$7}}END{{print "Min=",min,"s"}}' + ''' + min_sql_time = subprocess.check_output(min_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {min_sql_time}") + print(f"type: {type} sql time : {min_sql_time}") + + avg_sql_time_cmd = f''' + grep Spent {result_file} |awk '{{sum+=$7}}END{{print "Average=",sum/NR,"s"}}' + ''' + avg_sql_time = subprocess.check_output(avg_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {avg_sql_time}") + print(f"type: {type} sql time : {avg_sql_time}") + + # tdLog.printNoPrefix(f"==========type {type} sql is over==========") + + # tdLog.printNoPrefix("==========query operation is over==========") + + self.droptmpfile() + # tdLog.printNoPrefix("==========tmp file has been deleted==========") + + # def stop(self): + # tdSql.close() + # tdLog.success(f"{__file__} successfully executed") + +# tdCases.addLinux(__file__, TDTestCase()) +# tdCases.addWindows(__file__, TDTestCase()) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-r', + '--remove-cache', + action='store_true', + default=False, + help='clear cache before query (default: False)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='db', + type=str, + help='Database name to be created (default: db)') + parser.add_argument( + '-k', + '--keep-time', + action='store', + default=36500, + type=int, + help='Database keep parameters (default: 36500)') + + args = parser.parse_args() + jointest = JoinPerf(args.remove_cache, args.database_name, args.keep_time) + jointest.run() \ No newline at end of file diff --git a/tests/pytest/query/queryFilterTswithDateUnit.py b/tests/pytest/query/queryFilterTswithDateUnit.py new file mode 100644 index 0000000000000000000000000000000000000000..eb9eb02afd27aaef75c9afe2af4765fd748e636b --- /dev/null +++ b/tests/pytest/query/queryFilterTswithDateUnit.py @@ -0,0 +1,169 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def insertnow(self, tsp1, tsp2, tsp3): + + tdSql.execute( + "create table stbts (ts timestamp, ts1 timestamp, c1 int, ts2 timestamp) TAGS(t1 int)" + ) + tdSql.execute("create table tts1 using stbts tags(1)") + + tdSql.execute("insert into tts1 values (now+1d, now+1d, 6, now+1d)") + tdSql.execute("insert into tts1 values (now, now, 5, now)") + tdSql.execute("insert into tts1 values (now-1d, now-1d, 4, now-1d)") + tdSql.execute(f"insert into tts1 values ({tsp1}, {tsp1}, 3, {tsp1})") + tdSql.execute(f"insert into tts1 values ({tsp2}, {tsp2}, 2, {tsp2})") + tdSql.execute(f"insert into tts1 values ({tsp3}, {tsp3}, 1, {tsp3})") + + + def querynow(self): + + tdLog.printNoPrefix("==========step query: execute query operation") + time.sleep(1) + + cols = ["ts", "ts1", "ts2"] + + for col in cols: + tdSql.error(f" select * from tts1 where {col} = 1d ") + tdSql.error(f" select * from tts1 where {col} < -1d ") + tdSql.error(f" select * from tts1 where {col} > 1d ") + tdSql.error(f" select * from tts1 where {col} >= -1d ") + tdSql.error(f" select * from tts1 where {col} <= 1d ") + tdSql.error(f" select * from tts1 where {col} <> 1d ") + + tdSql.error(f" select * from tts1 where {col} = -1m ") + tdSql.error(f" select * from tts1 where {col} < 1m ") + tdSql.error(f" select * from tts1 where {col} > 1m ") + tdSql.error(f" select * from tts1 where {col} >= -1m ") + tdSql.error(f" select * from tts1 where {col} <= 1m ") + tdSql.error(f" select * from tts1 where {col} <> 1m ") + + tdSql.error(f" select * from tts1 where {col} = -1s ") + tdSql.error(f" select * from tts1 where {col} < 1s ") + tdSql.error(f" select * from tts1 where {col} > 1s ") + tdSql.error(f" select * from tts1 where {col} >= -1s ") + tdSql.error(f" select * from tts1 where {col} <= 1s ") + tdSql.error(f" select * from tts1 where {col} <> 1s ") + + tdSql.error(f" select * from tts1 where {col} = -1a ") + tdSql.error(f" select * from tts1 where {col} < 1a ") + tdSql.error(f" select * from tts1 where {col} > 1a ") + tdSql.error(f" select * from tts1 where {col} >= -1a ") + tdSql.error(f" select * from tts1 where {col} <= 1a ") + tdSql.error(f" select * from tts1 where {col} <> 1a ") + + tdSql.error(f" select * from tts1 where {col} = -1h ") + tdSql.error(f" select * from tts1 where {col} < 1h ") + tdSql.error(f" select * from tts1 where {col} > 1h ") + tdSql.error(f" select * from tts1 where {col} >= -1h ") + tdSql.error(f" select * from tts1 where {col} <= 1h ") + tdSql.error(f" select * from tts1 where {col} <> 1h ") + + tdSql.error(f" select * from tts1 where {col} = -1w ") + tdSql.error(f" select * from tts1 where {col} < 1w ") + tdSql.error(f" select * from tts1 where {col} > 1w ") + tdSql.error(f" select * from tts1 where {col} >= -1w ") + tdSql.error(f" select * from tts1 where {col} <= 1w ") + tdSql.error(f" select * from tts1 where {col} <> 1w ") + + tdSql.error(f" select * from tts1 where {col} = -1u ") + tdSql.error(f" select * from tts1 where {col} < 1u ") + tdSql.error(f" select * from tts1 where {col} > 1u ") + tdSql.error(f" select * from tts1 where {col} >= -1u ") + tdSql.error(f" select * from tts1 where {col} <= 1u ") + tdSql.error(f" select * from tts1 where {col} <> u ") + + tdSql.error(f" select * from tts1 where {col} = 0d ") + tdSql.error(f" select * from tts1 where {col} < 0s ") + tdSql.error(f" select * from tts1 where {col} > 0a ") + tdSql.error(f" select * from tts1 where {col} >= 0m ") + tdSql.error(f" select * from tts1 where {col} <= 0h ") + tdSql.error(f" select * from tts1 where {col} <> 0u ") + tdSql.error(f" select * from tts1 where {col} <> 0w ") + + tdSql.error(f" select * from tts1 where {col} = 1m+1h ") + tdSql.error(f" select * from tts1 where {col} < 1w-1d ") + tdSql.error(f" select * from tts1 where {col} > 0a/1u ") + tdSql.error(f" select * from tts1 where {col} >= 1d/0s ") + tdSql.error(f" select * from tts1 where {col} <= 1s*1a ") + tdSql.error(f" select * from tts1 where {col} <> 0w/0d ") + + tdSql.error(f" select * from tts1 where {col} = 1m+1h ") + tdSql.error(f" select * from tts1 where {col} < 1w-1d ") + tdSql.error(f" select * from tts1 where {col} > 0a/1u ") + tdSql.error(f" select * from tts1 where {col} >= 1d/0s ") + tdSql.error(f" select * from tts1 where {col} <= 1s*1a ") + tdSql.error(f" select * from tts1 where {col} <> 0w/0d ") + + tdSql.error(f" select * from tts1 where {col} = 1u+1 ") + tdSql.error(f" select * from tts1 where {col} < 1a-1 ") + tdSql.error(f" select * from tts1 where {col} > 1s*1 ") + tdSql.error(f" select * from tts1 where {col} >= 1m/1 ") + tdSql.error(f" select * from tts1 where {col} <= 1h/0 ") + tdSql.error(f" select * from tts1 where {col} <> 0/1d ") + tdSql.error(f" select * from tts1 where {col} <> 1w+'2010-01-01 00:00:00' ") + + tdSql.error(f" select * from tts1 where {col} = 1-1h ") + tdSql.error(f" select * from tts1 where {col} < 1w-d ") + tdSql.error(f" select * from tts1 where {col} > 0/u ") + tdSql.error(f" select * from tts1 where {col} >= d/s ") + tdSql.error(f" select * from tts1 where {col} <= 1/a ") + tdSql.error(f" select * from tts1 where {col} <> d/1 ") + + def run(self): + tdSql.execute("drop database if exists dbms") + tdSql.execute("drop database if exists dbus") + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tdLog.printNoPrefix("==========step1:create table precision ms && insert data && query") + # create databases precision is ms + tdSql.execute("create database if not exists dbms keep 36500") + tdSql.execute("use dbms") + tsp1 = 0 + tsp2 = -28800000 + tsp3 = -946800000000 + self.insertnow(tsp1,tsp2,tsp3) + self.querynow() + + tdLog.printNoPrefix("==========step2:create table precision us && insert data && query") + # create databases precision is us + tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ") + tdSql.execute("use dbus") + tsp2 = tsp2 * 1000 + tsp3 = tsp3 * 1000 + self.insertnow(tsp1,tsp2,tsp3) + self.querynow() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py index 4e3f9dd3bdd6376ac1069c4e084944a577ce4a70..52e49a57c6883f6fe57df887756bbf2d27199806 100644 --- a/tests/pytest/query/queryNormal.py +++ b/tests/pytest/query/queryNormal.py @@ -16,7 +16,7 @@ import taos from util.log import * from util.cases import * from util.sql import * - +from util.dnodes import * class TDTestCase: def init(self, conn, logSql): @@ -122,6 +122,32 @@ class TDTestCase: tdSql.query("SELECT server_status()") tdSql.checkRows(1) + # https://jira.taosdata.com:18080/browse/TD-3800 + tdSql.execute("create table m1(ts timestamp, k int) tags(a int)") + tdSql.execute("create table tm0 using m1 tags(1)") + tdSql.execute("create table tm1 using m1 tags(2)") + tdSql.execute("insert into tm0 values('2020-3-1 1:1:1', 112)") + tdSql.execute("insert into tm1 values('2020-1-1 1:1:1', 1)('2020-3-1 0:1:1', 421)") + + tdSql.query("select last(*) from m1 group by tbname") + tdSql.checkData(0, 0, "2020-03-01 01:01:01") + tdSql.checkData(0, 1, 112) + tdSql.checkData(0, 2, "tm0") + tdSql.checkData(1, 0, "2020-03-01 00:01:01") + tdSql.checkData(1, 1, 421) + tdSql.checkData(1, 2, "tm1") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select last(*) from m1 group by tbname") + tdSql.checkData(0, 0, "2020-03-01 01:01:01") + tdSql.checkData(0, 1, 112) + tdSql.checkData(0, 2, "tm0") + tdSql.checkData(1, 0, "2020-03-01 00:01:01") + tdSql.checkData(1, 1, 421) + tdSql.checkData(1, 2, "tm1") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/querySecondtscolumnTowherenow.py b/tests/pytest/query/querySecondtscolumnTowherenow.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc18d99a60146b416dab1e8d9f7f2417be4d00c --- /dev/null +++ b/tests/pytest/query/querySecondtscolumnTowherenow.py @@ -0,0 +1,131 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + # timestamp list: + # 0->"1970-01-01 08:00:00" | -28800000->"1970-01-01 00:00:00" | -946800000000->"1940-01-01 00:00:00" + ts1 = 0 + ts2 = -28800000 + ts3 = -946800000000 + tdSql.execute( + "create table stb2ts (ts timestamp, ts1 timestamp, ts2 timestamp, c1 int, ts3 timestamp) TAGS(t1 int)" + ) + tdSql.execute("create table t2ts1 using stb2ts tags(1)") + + tdSql.execute("insert into t2ts1 values (now, now, now, 1, now)") + tdSql.execute("insert into t2ts1 values (now-1m, now-1m, now-1m, 1, now-1m)") + tdSql.execute(f"insert into t2ts1 values ({ts1}, {ts1}, {ts1}, 1, {ts1})") + # tdSql.execute(f"insert into t2ts1 values ({ts2}, {ts2}, {ts2}, 1, {ts2})") + # tdSql.execute(f"insert into t2ts1 values ({ts3}, {ts3}, {ts3}, 1, {ts3})") + + tdLog.printNoPrefix("==========step2:query") + time.sleep(1) + # query primary key timestamp column + tdSql.execute("select * from t2ts1 where ts < now") + ts_len1 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts <= now") + ts_len2 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts > now") + ts_len3 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts >= now") + ts_len4 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts = now") + ts_len5 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts <> now") + ts_len6 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts between 0 and now") + ts_len7 = len(tdSql.cursor.fetchall()) + tdSql.execute("select * from t2ts1 where ts between now and now+100d") + ts_len8 = len(tdSql.cursor.fetchall()) + + # query noemal timestamp column + tdSql.query("select * from t2ts1 where ts1 < now") + tdSql.checkRows(ts_len1) + tdSql.query("select * from t2ts1 where ts2 < now") + tdSql.checkRows(ts_len1) + tdSql.query("select * from t2ts1 where ts3 < now") + tdSql.checkRows(ts_len1) + + tdSql.query("select * from t2ts1 where ts1 <= now") + tdSql.checkRows(ts_len2) + tdSql.query("select * from t2ts1 where ts2 <= now") + tdSql.checkRows(ts_len2) + tdSql.query("select * from t2ts1 where ts3 <= now") + tdSql.checkRows(ts_len2) + + tdSql.query("select * from t2ts1 where ts1 > now") + tdSql.checkRows(ts_len3) + tdSql.query("select * from t2ts1 where ts2 > now") + tdSql.checkRows(ts_len3) + tdSql.query("select * from t2ts1 where ts3 > now") + tdSql.checkRows(ts_len3) + + tdSql.query("select * from t2ts1 where ts1 >= now") + tdSql.checkRows(ts_len4) + tdSql.query("select * from t2ts1 where ts2 >= now") + tdSql.checkRows(ts_len4) + tdSql.query("select * from t2ts1 where ts3 >= now") + tdSql.checkRows(ts_len4) + + tdSql.query("select * from t2ts1 where ts1 = now") + tdSql.checkRows(ts_len5) + tdSql.query("select * from t2ts1 where ts2 = now") + tdSql.checkRows(ts_len5) + tdSql.query("select * from t2ts1 where ts2 = now") + tdSql.checkRows(ts_len5) + + tdSql.query("select * from t2ts1 where ts1 <> now") + tdSql.checkRows(ts_len6) + tdSql.query("select * from t2ts1 where ts2 <> now") + tdSql.checkRows(ts_len6) + tdSql.query("select * from t2ts1 where ts3 <> now") + tdSql.checkRows(ts_len6) + + tdSql.query("select * from t2ts1 where ts1 between 0 and now") + tdSql.checkRows(ts_len7) + tdSql.query("select * from t2ts1 where ts2 between 0 and now") + tdSql.checkRows(ts_len7) + tdSql.query("select * from t2ts1 where ts3 between 0 and now") + tdSql.checkRows(ts_len7) + + tdSql.query("select * from t2ts1 where ts1 between now and now+100d") + tdSql.checkRows(ts_len8) + tdSql.query("select * from t2ts1 where ts2 between now and now+100d") + tdSql.checkRows(ts_len8) + tdSql.query("select * from t2ts1 where ts3 between now and now+100d") + tdSql.checkRows(ts_len8) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/queryTscomputWithNow.py b/tests/pytest/query/queryTscomputWithNow.py new file mode 100644 index 0000000000000000000000000000000000000000..3b808d551c374a4af654beec5d9c386745385e2b --- /dev/null +++ b/tests/pytest/query/queryTscomputWithNow.py @@ -0,0 +1,177 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def inertnow(self): + tsp1 = 0 + tsp2 = -28800000 + tsp3 = -946800000000 + + tdSql.execute( + "create table stbts (ts timestamp, ts1 timestamp, c1 int, ts2 timestamp) TAGS(t1 int)" + ) + tdSql.execute("create table tts1 using stbts tags(1)") + + tdSql.execute("insert into tts1 values (now+1d, now+1d, 6, now+1d)") + tdSql.execute("insert into tts1 values (now, now, 5, now)") + tdSql.execute("insert into tts1 values (now-1d, now-1d, 4, now-1d)") + tdSql.execute(f"insert into tts1 values ({tsp1}, {tsp1}, 3, {tsp1})") + tdSql.execute(f"insert into tts1 values ({tsp2}, {tsp2}, 2, {tsp2})") + tdSql.execute(f"insert into tts1 values ({tsp3}, {tsp3}, 1, {tsp3})") + + def querynow(self): + interval_day1 = (datetime.date.today() - datetime.date(1970, 1, 1)).days + interval_day2 = (datetime.date.today() - datetime.date(1940, 1, 1)).days + + tdLog.printNoPrefix("==========step query: execute query operation") + time.sleep(1) + tdSql.execute(" select * from tts1 where ts > now+1d ") + ts_len1 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts < now+1d ") + ts_len2 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts > now-1d ") + ts_len3 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts < now-1d ") + ts_len4 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day1+1}d ") + ts_len5 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day1+1}d ") + ts_len6 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day1-1}d ") + ts_len7 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day1-1}d ") + ts_len8 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day2+1}d ") + ts_len9 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day2+1}d ") + ts_len10 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day2-1}d ") + ts_len11 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day2-1}d ") + ts_len12 = len(tdSql.cursor.fetchall()) + + tdSql.query(" select * from tts1 where ts1 > now+1d ") + tdSql.checkRows(ts_len1) + tdSql.query(" select * from tts1 where ts2 > now+1440m ") + tdSql.checkRows(ts_len1) + + tdSql.query(" select * from tts1 where ts1 < now+1d ") + tdSql.checkRows(ts_len2) + tdSql.query(" select * from tts1 where ts2 < now+1440m ") + tdSql.checkRows(ts_len2) + + tdSql.query(" select * from tts1 where ts1 > now-1d ") + tdSql.checkRows(ts_len3) + tdSql.query(" select * from tts1 where ts2 > now-1440m ") + tdSql.checkRows(ts_len3) + + tdSql.query(" select * from tts1 where ts1 < now-1d ") + tdSql.checkRows(ts_len4) + tdSql.query(" select * from tts1 where ts2 < now-1440m ") + tdSql.checkRows(ts_len4) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1+1}d ") + tdSql.checkRows(ts_len5) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1+1)*1440}m " ) + tdSql.checkRows(ts_len5) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1+1}d ") + tdSql.checkRows(ts_len6) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1+1)*1440}m ") + tdSql.checkRows(ts_len6) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1-1}d ") + tdSql.checkRows(ts_len7) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1-1)*1440}m ") + tdSql.checkRows(ts_len7) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1-1}d ") + tdSql.checkRows(ts_len8) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1-1)*1440}m ") + tdSql.checkRows(ts_len8) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 + 1}d ") + tdSql.checkRows(ts_len9) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 + 1)*1440}m ") + tdSql.checkRows(ts_len9) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 + 1}d ") + tdSql.checkRows(ts_len10) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 + 1)*1440}m ") + tdSql.checkRows(ts_len10) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 - 1}d ") + tdSql.checkRows(ts_len11) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 - 1)*1440}m ") + tdSql.checkRows(ts_len11) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 - 1}d ") + tdSql.checkRows(ts_len12) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 - 1)*1440}m ") + tdSql.checkRows(ts_len12) + + + + def run(self): + tdSql.execute("drop database if exists dbms") + tdSql.execute("drop database if exists dbus") + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tdLog.printNoPrefix("==========step1:create table precision ms && insert data && query") + # create databases precision is ms + tdSql.execute("create database if not exists dbms keep 36500") + tdSql.execute("use dbms") + self.inertnow() + self.querynow() + + tdLog.printNoPrefix("==========step2:create table precision us && insert data && query") + # create databases precision is us + tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ") + tdSql.execute("use dbus") + self.inertnow() + self.querynow() + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.printNoPrefix("==========step3:after wal, query table precision ms") + tdSql.execute("use dbus") + self.querynow() + + tdLog.printNoPrefix("==========step4: query table precision us") + tdSql.execute("use dbus") + self.querynow() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/cqSupportBefore1970.py b/tests/pytest/stream/cqSupportBefore1970.py new file mode 100644 index 0000000000000000000000000000000000000000..75587d1743c7b57b6d53bb43c3c04bdc52e93aad --- /dev/null +++ b/tests/pytest/stream/cqSupportBefore1970.py @@ -0,0 +1,93 @@ +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def insertnow(self): + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tsp1 = 0 + tsp2 = -28800000 + tsp3 = -946800000000 + tsp4 = "1969-01-01 00:00:00.000" + + tdSql.execute("insert into tcq1 values (now-11d, 5)") + tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)") + tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)") + tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)") + tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)") + + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds") + try: + for i in range(timeout): + tdSql.cursor.execute(sql) + self.queryResult = tdSql.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(tdSql.cursor.description) + # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}") + raise Exception(repr(e)) + return (self.queryRows, timeout) + + def cq(self): + tdSql.execute( + "create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)" + ) + self.waitedQuery("select * from cq1", 1, 120) + + def querycq(self): + tdSql.query("select * from cq1") + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(10, 1, 2.0) + + def run(self): + tdSql.execute("drop database if exists dbcq") + tdSql.execute("create database if not exists dbcq keep 36500") + tdSql.execute("use dbcq") + + tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)") + tdSql.execute("create table tcq1 using stbcq tags(1)") + + self.insertnow() + self.cq() + self.querycq() + + # after wal and sync, check again + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.querycq() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py index 70f300e937b2ed422231a774d2bca1183ce6fd0a..4a0e47c01ad9f9aac7ed78be0ff4fc93fc0d41ed 100644 --- a/tests/pytest/stream/new.py +++ b/tests/pytest/stream/new.py @@ -42,7 +42,7 @@ class TDTestCase: tdLog.info("=============== step3") start = time.time() - tdSql.waitedQuery("select * from st", 1, 120) + tdSql.waitedQuery("select * from st", 1, 180) delay = int(time.time() - start) + 80 v = tdSql.getData(0, 3) if v >= 51: diff --git a/tests/pytest/stream/showStreamExecTimeisNull.py b/tests/pytest/stream/showStreamExecTimeisNull.py new file mode 100644 index 0000000000000000000000000000000000000000..39b025901856c1de6c21a1889d97a4b599c53de4 --- /dev/null +++ b/tests/pytest/stream/showStreamExecTimeisNull.py @@ -0,0 +1,97 @@ +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def insertnow(self): + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tsp1 = 0 + tsp2 = -28800000 + tsp3 = -946800000000 + tsp4 = "1969-01-01 00:00:00.000" + + tdSql.execute("insert into tcq1 values (now-11d, 5)") + tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)") + tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)") + tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)") + tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)") + + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds") + try: + for i in range(timeout): + tdSql.cursor.execute(sql) + self.queryResult = tdSql.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(tdSql.cursor.description) + # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}") + raise Exception(repr(e)) + return (self.queryRows, timeout) + + def showstream(self): + tdSql.execute( + "create table cq1 as select avg(c1) from tcq1 interval(10d) sliding(1d)" + ) + sql = "show streams" + timeout = 30 + exception = "ValueError('year -292275055 is out of range')" + try: + for i in range(timeout): + tdSql.cursor.execute(sql) + self.queryResult = tdSql.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(tdSql.cursor.description) + # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= timeout: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + tdLog.info(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ") + else: + tdLog.exit(f"sql: {sql} except raise {exception}, actually not") + + + def run(self): + tdSql.execute("drop database if exists dbcq") + tdSql.execute("create database if not exists dbcq keep 36500") + tdSql.execute("use dbcq") + + tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)") + tdSql.execute("create table tcq1 using stbcq tags(1)") + + self.insertnow() + self.showstream() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index d71742048a45774fbe6909093840f265b829eb4c..9b4eb8725c96f95196f251c55b0b773cd68e9ed5 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -88,6 +88,8 @@ class TDTestCase: except Exception as e: tdLog.info(repr(e)) + + time.sleep(5) tdSql.query("show streams") tdSql.checkRows(1) tdSql.checkData(0, 2, 's0') @@ -146,6 +148,7 @@ class TDTestCase: except Exception as e: tdLog.info(repr(e)) + time.sleep(5) tdSql.query("show streams") tdSql.checkRows(2) tdSql.checkData(0, 2, 's1') diff --git a/tests/pytest/stream/sys.py b/tests/pytest/stream/sys.py index a73e7043e8c65b2eb9c78fbcb99d4e546ddf9ae4..c9a3fccfe68b61da722dcdb2ccab63bf3d5bcabc 100644 --- a/tests/pytest/stream/sys.py +++ b/tests/pytest/stream/sys.py @@ -47,7 +47,7 @@ class TDTestCase: "select * from iostrm", ] for sql in sqls: - (rows, _) = tdSql.waitedQuery(sql, 1, 120) + (rows, _) = tdSql.waitedQuery(sql, 1, 240) if rows < 1: tdLog.exit("failed: sql:%s, expect at least one row" % sql) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json new file mode 100644 index 0000000000000000000000000000000000000000..5e53bd7e7d10edea9bdbc56ef9ab737dbb34684e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 60, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py new file mode 100644 index 0000000000000000000000000000000000000000..1401716da9095b44aa47e9ecb2e7131bc0a8b9ea --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py @@ -0,0 +1,89 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import subprocess +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # # insert 1000w rows in stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0,60) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 6000000) + os.system('%staosdemo -f tools/taosdemoAllTest/TD-3453/queryall.json -y & ' % binPath) + time.sleep(2) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "TD-3453/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_1 > 10.0 : + os.system("kill -9 %d" % query_pid) + time.sleep(5) + taosd_cpu_load_2 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_2 < 10.0 : + suc_kill = 60 + else: + suc_kill = 10 + print("taosd_cpu_load is higher than 10%") + else: + suc_kill = 20 + print("taosd_cpu_load is still less than 10%") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, "%d" % suc_kill) + os.system("rm -rf querySystemInfo*") + os.system("rm -rf insert_res.txt") + os.system("rm -rf insert_res.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json new file mode 100644 index 0000000000000000000000000000000000000000..a92906fa730833108ad758d3fc53c954279abe38 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json @@ -0,0 +1,20 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "specified_table_query":{ + "query_interval":1, + "concurrent":1, + "sqls":[ + { + "sql": "select * from stb0", + "result": "" + } + ] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/convertResFile.py b/tests/pytest/tools/taosdemoAllTest/convertResFile.py new file mode 100644 index 0000000000000000000000000000000000000000..52bb8f40d0f0a5a55450ecb4927067f37f862499 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/convertResFile.py @@ -0,0 +1,35 @@ +from datetime import datetime +import time +import os + +os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt") +with open('./new_query_res0.txt','r+') as f0: + contents = f0.readlines() + if os.path.exists('./test_query_res0.txt'): + os.system("rm -rf ./test_query_res0.txt") + for i in range(len(contents)): + content = contents[i].rstrip('\n') + stimestamp = content.split(',')[0] + timestamp = int(stimestamp) + d = datetime.fromtimestamp(timestamp/1000) + str0 = d.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ts = "'"+str0+"'" + str1 = "'"+content.split(',')[1]+"'" + str2 = "'"+content.split(',')[2]+"'" + content = ts + "," + str1 + "," + str2 + "," + content.split(',',3)[3] + contents[i] = content + "\n" + with open('./test_query_res0.txt','a') as fi: + fi.write(contents[i]) + +os.system("rm -rf ./new_query_res0.txt") + + + + + + + +# timestamp = 1604160000099 +# d = datetime.fromtimestamp(timestamp/1000) +# str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") +# print(str1[:-3]) diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json new file mode 100644 index 0000000000000000000000000000000000000000..8e40ad812d0ad0a9e243fc2e371a9be35fa0f7f2 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":4}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json new file mode 100644 index 0000000000000000000000000000000000000000..e741fd5c056c329a0a785b050dc2e94dd2dfd9ca --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json new file mode 100644 index 0000000000000000000000000000000000000000..fddaa4b4b99d441fb81fcd350ad2d6a4279f9c4e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 100, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 100, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json new file mode 100644 index 0000000000000000000000000000000000000000..f6a103f001f312514b3c17df481f6c0daa35c410 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json new file mode 100644 index 0000000000000000000000000000000000000000..17050278c8825fb4af9bbf8ed6edcc063a746007 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1024}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json new file mode 100644 index 0000000000000000000000000000000000000000..53735dc41342cde1cc9cb0a3376be07629464531 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json new file mode 100644 index 0000000000000000000000000000000000000000..115c42b502951007aafa1b9c72de45bfcbb3f9fc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json new file mode 100644 index 0000000000000000000000000000000000000000..26e8b7e88dabecade8dd4f983976347380ea3830 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 150, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 151, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json new file mode 100644 index 0000000000000000000000000000000000000000..c7c5150a060ff76221025def1d7c31194c52dfed --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 100, + "interlace_rows": 0, + "num_of_records_per_req": 2000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval": 2000, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":9}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json new file mode 100644 index 0000000000000000000000000000000000000000..72e380a66cb3cfd2b3bade57f000bbebbf29baf4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json new file mode 100644 index 0000000000000000000000000000000000000000..3115c9ba72692cd7c5d72de030cc7d9110f8c054 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 30, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json new file mode 100644 index 0000000000000000000000000000000000000000..7fdba4add14e8f91bfe516366b8c936c133f5546 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbno", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-offset.json b/tests/pytest/tools/taosdemoAllTest/insert-offset.json new file mode 100644 index 0000000000000000000000000000000000000000..611b4a898975ec1a0b6f528e47961e0bccacd7af --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-offset.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"yes", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset":7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json new file mode 100644 index 0000000000000000000000000000000000000000..72e380a66cb3cfd2b3bade57f000bbebbf29baf4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample.json b/tests/pytest/tools/taosdemoAllTest/insert-sample.json new file mode 100644 index 0000000000000000000000000000000000000000..015993227e60123581e4546b0544945f6962921c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbtest123", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "./tools/taosdemoAllTest/tags.csv", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json new file mode 100644 index 0000000000000000000000000000000000000000..01d8ac90982b762a2c51edb55db9760f4c7e6f4f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py new file mode 100644 index 0000000000000000000000000000000000000000..703f755c31c7b325e34b93878e2e3175648834ef --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-newdb.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit1.json & " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit94.json & " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit5.json & " % binPath) + sleep(15) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1000000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json new file mode 100644 index 0000000000000000000000000000000000000000..ad6cb8118da9f8f37041778e7ea6dfbcbc9f6b29 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 1, + "childtable_offset": 99, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json new file mode 100644 index 0000000000000000000000000000000000000000..7109dab53f78783c1d624210a85aec31fbcf1507 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 5, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json new file mode 100644 index 0000000000000000000000000000000000000000..a98a185b54464aedddd85d5ea4834d6107dd216b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 94, + "childtable_offset": 5, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json new file mode 100644 index 0000000000000000000000000000000000000000..e2f3fb037969901cc25e474302cdeee9a08163c0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json @@ -0,0 +1,61 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sample.csv b/tests/pytest/tools/taosdemoAllTest/sample.csv new file mode 100644 index 0000000000000000000000000000000000000000..471118a2ce9466bdb629434a32407c9616de9e3e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sample.csv @@ -0,0 +1,3 @@ +1,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true' +0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true' +0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false' \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/speciQuery.json b/tests/pytest/tools/taosdemoAllTest/speciQuery.json new file mode 100644 index 0000000000000000000000000000000000000000..5e99e80108d434da5b0010394e2aca97d58ea7a7 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/speciQuery.json @@ -0,0 +1,36 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "specified_table_query": { + "query_interval": 1, + "concurrent": 3, + "sqls": [ + { + "sql": "select last_row(*) from stb0 ", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb00_1", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json new file mode 100644 index 0000000000000000000000000000000000000000..ec9cb5a40da9a85928dcc7aea4abb6c96ab36976 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/tags.csv b/tests/pytest/tools/taosdemoAllTest/tags.csv new file mode 100644 index 0000000000000000000000000000000000000000..89bf8e3fb34a9498601a72bfc2779d93d9ab7a91 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/tags.csv @@ -0,0 +1,2 @@ +1,-127,127,'23ac,;\[uer]3','true' +1,-127,126,'23ac,;\[uer]3','true' diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd50bf63906f8e74ad9fffc6518d40ac017939a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -0,0 +1,229 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200000) + + + # insert: create mutiple tables per sql and insert one rows per sql . + os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400000) + + # insert: using parament "insert_interval to controls spped of insert. + # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2000000) + tdSql.query("show stables") + tdSql.checkData(1, 4, 100) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 2000000) + + # spend 2min30s for 3 testcases. + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) + tdSql.error("show dbno.stables") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 5) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 6) + tdSql.query("select count (tbname) from stb2") + tdSql.checkData(0, 0, 7) + tdSql.query("select count (tbname) from stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count (tbname) from stb4") + tdSql.checkData(0, 0, 8) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 240) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 220) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 180) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 150) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 340) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 400) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 120) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 140) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + + + # insert: let parament in json file is illegal ,i need know how to write exception. + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns.json -y " % binPath) + tdSql.error("use db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-lmax.json -y " % binPath) + tdSql.error("select * from db.stb0") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-count-0.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from db.stb0") + tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-tags-count129.json -y " % binPath) + tdSql.error("use db1") + + # insert: timestamp and step + os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select last(ts) from db.stb00_0") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 200) + tdSql.query("select last(ts) from db.stb01_0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400) + + # # insert: disorder_ratio + os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10) + + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select col2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select t1 from stb1") + tdSql.checkData(0, 0, -127) + tdSql.query("select t2 from stb1") + tdSql.checkData(1, 0, 126) + + # insert: test interlace parament + os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 15000) + + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py new file mode 100644 index 0000000000000000000000000000000000000000..00b387e398bfba06efaeeaf1072905210b9ae99e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/speciQuery.json" % binPath) + os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") + os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") + os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") + tdSql.execute("use db") + tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') + os.system("python3 tools/taosdemoAllTest/convertResFile.py") + tdSql.execute("insert into result0 file './test_query_res0.txt'") + tdSql.query("select ts from result0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") + tdSql.query("select count(*) from result0") + tdSql.checkData(0, 0, 1) + with open('./all_query_res1.txt','r+') as f1: + result1 = int(f1.readline()) + tdSql.query("select count(*) from stb00_1") + tdSql.checkData(0, 0, "%d" % result1) + + with open('./all_query_res2.txt','r+') as f2: + result2 = int(f2.readline()) + d2 = datetime.fromtimestamp(result2/1000) + timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") + tdSql.query("select last_row(ts) from stb1") + tdSql.checkData(0, 0, "%s" % timest) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestQuerytWithJson.py.sql") + os.system("rm -rf ./querySystemInfo*") + os.system("rm -rf ./query_res*") + os.system("rm -rf ./all_query*") + os.system("rm -rf ./test_query_res0.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py index bb367817cf082a4a4b75f2deac6883d72d7ba145..9aa131624e83e04df12b59b0b0318562098c77cb 100644 --- a/tests/pytest/tools/taosdemoTestTblAlt.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -100,8 +100,8 @@ class TDTestCase: print("alter table test.meters add column col10 int") tdSql.execute("alter table test.meters add column col10 int") - print("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + print("insert into test.t9 values (now, 1, 2, 3, 4, 0)") + tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0)") def run(self): tdSql.prepare() diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 16931cca333a3300b7d0d6831bbb51db0238b1d1..8f62c5932b71049e97375ef6c57ecb563d204844 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -87,6 +87,7 @@ class TDSql: self.queryResult = self.cursor.fetchall() self.queryRows = len(self.queryResult) self.queryCols = len(self.cursor.description) + tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) if self.queryRows >= expectRows: return (self.queryRows, i) time.sleep(1) @@ -105,6 +106,14 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + def checkCols(self, expectCols): + if self.queryCols == expectCols: + tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryCols, expectCols) + tdLog.exit("%s(%d) failed: sql:%s, queryCols:%d != expect:%d" % args) + def checkRowCol(self, row, col): caller = inspect.getframeinfo(inspect.stack()[2][0]) if row < 0: @@ -127,6 +136,11 @@ class TDSql: def checkData(self, row, col, data): self.checkRowCol(row, col) if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP") and self.queryResult[row][col] == datetime.datetime.fromisoformat(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (self.sql, row, col, self.queryResult[row][col], data)) + return + if str(self.queryResult[row][col]) == str(data): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) diff --git a/tests/script/general/parser/alter1.sim b/tests/script/general/parser/alter1.sim index a52202fc1abe044151f8af1903159bb1422e8b61..3b6b0d946599a642cd78b7fbc917d9b5e3d5ed50 100644 --- a/tests/script/general/parser/alter1.sim +++ b/tests/script/general/parser/alter1.sim @@ -129,8 +129,8 @@ if $rows != 3 then return -1 endi -sql drop database $db -sql show databases -if $rows != 0 then - return -1 -endi +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 2c56c6445fa8134ab28837940a11ff6f4127b7c7..191f56fcfb7d13caf1cb981f518e8bdd439c42b3 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -414,6 +414,7 @@ if $rows != 1 then endi if $data00 != 0.204545455 then + print expect 0.204545455, actual: $data00 return -1 endi diff --git a/tests/script/general/parser/first_last_query.sim b/tests/script/general/parser/first_last_query.sim index 2d08759f3fb0bdd78a3163e335334a49504ec861..2dff1dd51b4fe7d3646a013302b084f66b31669d 100644 --- a/tests/script/general/parser/first_last_query.sim +++ b/tests/script/general/parser/first_last_query.sim @@ -269,4 +269,50 @@ if $data14 != @test2@ then return -1 endi -sql drop table stest \ No newline at end of file +sql drop table stest + +print ===================>td-3779 +sql create table m1(ts timestamp, k int) tags(a int); +sql create table tm0 using m1 tags(1); +sql create table tm1 using m1 tags(2); +sql insert into tm0 values('2020-3-1 1:1:1', 112); +sql insert into tm1 values('2020-1-1 1:1:1', 1)('2020-3-1 0:1:1', 421); +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 1000 + +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed +sleep 1000 +sql connect +sql use first_db0; + +sql select last(*) from m1 group by tbname; +if $rows != 2 then + return -1 +endi + +if $data00 != @20-03-01 01:01:01.000@ then + return -1 +endi + +if $data01 != 112 then + return -1 +endi + +if $data02 != @tm0@ then + return -1 +endi + +if $data10 != @20-03-01 00:01:01.000@ then + return -1 +endi + +if $data11 != 421 then + return -1 +endi + +if $data12 != @tm1@ then + return -1 +endi + +sql drop table m1 \ No newline at end of file diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim index 2e95664d31fd728a6cba3f775e3b9f07b6249ce0..ddafdd73293d75bc99380969d98c7fb986420a38 100644 --- a/tests/script/general/parser/having.sim +++ b/tests/script/general/parser/having.sim @@ -141,9 +141,7 @@ if $data30 != 4 then endi sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; - sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; - sql_error select top(f1,2) from st2 group by f1 having avg(f1) > 0; sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2; diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim index 1f29a63b9100c799deb3937dc317c4bcb5326aea..a38db3fe44e8857ba646128a856371468d723b2b 100644 --- a/tests/script/general/parser/having_child.sim +++ b/tests/script/general/parser/having_child.sim @@ -1,6 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start @@ -1460,8 +1460,7 @@ if $data03 != 0.000000000 then return -1 endi -sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) < 0 and avg(f1) = 3; - +#sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) < 0 and avg(f1) = 3; sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2; sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0; diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index 2322496a94467d44e3fdc5eabe29dded23e7dbde..c72b2c5b3e4018c892f1194671b0d577c053c7f5 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -142,7 +142,7 @@ if $rows != 300 then endi if $data00 != @70-01-01 08:01:40.990@ then - print expect 0, actual: $data00 + print expect 70-01-01 08:01:40.990, actual: $data00 return -1 endi diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 9fe287960d22642bbea0139246d3f90537fef628..c35fd369ca33eb7158ed3223a40f37c522702bc5 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -148,6 +148,9 @@ if $rows != 8200 then return -1 endi +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 100000; + + sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 8190; if $rows != 10 then return -1 diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index da8e8765773bc6cf8a4ae2b1b0b1a6bb38a1194a..7e5f31f7594bdde5e068e0d754ae863662696759 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -26,7 +26,7 @@ sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db -sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12), t3 int) $i = 0 $j = 1 @@ -36,7 +36,7 @@ while $i < $tbNum $tg2 = ' . abc $tg2 = $tg2 . $i $tg2 = $tg2 . ' - sql create table $tb using $mt tags( $i , $tg2 ) + sql create table $tb using $mt tags( $i , $tg2 , 123 ) $x = 0 while $x < $rowNum @@ -85,6 +85,7 @@ if $data00 != @70-01-01 08:01:40.000@ then endi if $data01 != @select_tags_tb0@ then + print expect: select_tags_tb0, actual: $data01 return -1 endi @@ -160,7 +161,7 @@ if $data03 != @abc15@ then endi sql select top(c6, 3) from select_tags_mt0 interval(10a) -sql select top(c3,10) from select_tags_mt0 interval(10a) group by tbname +sql select top(c3,10) from select_tags_mt0 interval(10a) group by tbname,t1,t2 sql select top(c6, 3) from select_tags_mt0 interval(10a) group by tbname; sql select top(c6, 10) from select_tags_mt0 interval(10a); @@ -813,7 +814,46 @@ if $row != 1 then return -1 endi -print ======= selectivity + tags+ group by + tags + filter + interval + join=========== +print TODO ======= selectivity + tags+ group by + tags + filter + interval + join=========== + +print ==========================mix tag columns and group by columns====================== +sql select top(c1, 100), tbname from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by t3 +if $rows != 100 then + return -1 +endi + +if $data00 != @70-01-01 08:01:40.094@ then + print expect: 70-01-01 08:01:40.094, actual: $data00 + return -1 +endi + +if $data01 != 94 then + return -1 +endi + +if $data02 != @select_tags_tb0@ then + return -1 +endi + +if $data03 != 123 then + return -1 +endi + +if $data10 != @70-01-01 08:01:40.095@ then + return -1 +endi + +if $data11 != 95 then + return -1 +endi + +if $data12 != @select_tags_tb0@ then + return -1 +endi + +if $data13 != 123 then + return -1 +endi print ======error sql============================================= diff --git a/tests/script/general/parser/subInfrom.sim b/tests/script/general/parser/subInfrom.sim new file mode 100644 index 0000000000000000000000000000000000000000..e47831ee8797e3a9a09ee933c7286740120623e6 --- /dev/null +++ b/tests/script/general/parser/subInfrom.sim @@ -0,0 +1,147 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 100 +sql connect +sleep 100 + +print ========== sub_in_from.sim +$i = 0 + +$dbPrefix = subdb +$tbPrefix = sub_tb +$stbPrefix = sub_stb +$tbNum = 10 +$rowNum = 1000 +$totalNum = $tbNum * $rowNum +$loops = 200000 +$log = 10000 +$ts0 = 1537146000000 +$delta = 600000 +$i = 0 +$db = $dbPrefix . $i +$stb = $stbPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db cache 16 maxrows 4096 keep 36500 +print ====== create tables +sql use $db +sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) + +$i = 0 +$ts = $ts0 +$halfNum = $tbNum / 2 +while $i < $halfNum + $tbId = $i + $halfNum + $tb = $tbPrefix . $i + $tb1 = $tbPrefix . $tbId + sql create table $tb using $stb tags( $i ) + sql create table $tb1 using $stb tags( $tbId ) + + $x = 0 + while $x < $rowNum + $xs = $x * $delta + $ts = $ts0 + $xs + $c = $x / 10 + $c = $c * 10 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) + sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 +endw +print ====== tables created + +sql_error select count(*) from (select count(*) from abc.sub_stb0) +sql_error select val + 20 from (select count(*) from sub_stb0 interval(10h)) +sql_error select abc+20 from (select count(*) from sub_stb0 interval(1s)) + +sql select count(*) from (select count(*) from sub_stb0 interval(10h)) +if $rows != 1 then + return -1 +endi + +if $data00 != 18 then + print expect 18, actual: $data00 + return -1 +endi + +sql select ts from (select count(*) from sub_stb0 interval(10h)) +if $rows != 18 then + return -1 +endi + +if $data00 != @18-09-17 04:00:00.000@ then + return -1 +endi + +if $data01 != @18-09-17 14:00:00.000@ then + return -1 +endi + +sql select val + 20, val from (select count(*) as val from sub_stb0 interval(10h)) +if $rows != 18 then + return -1 +endi + +if $data00 != 320.000000 then + return -1 +endi + +if $data01 != 300 then + return -1 +endi + +if $data10 != 620 then + return -1 +endi + +if $data11 != 600 then + return -1 +endi + +if $data20 != 620 then + return -1 +endi + +if $data21 != 600 then + return -1 +endi + +sql select max(val), min(val), max(val) - min(val) from (select count(*) val from sub_stb0 interval(10h)) +if $rows != 1 then + return -1 +endi + +if $data00 != 600 then + return -1 +endi + +if $data01 != 100 then + return -1 +endi + +if $data02 != 500.000000 then + return -1 +endi + +sql select first(ts,val),last(ts,val) from (select count(*) val from sub_stb0 interval(10h)) +sql select top(val, 5) from (select count(*) val from sub_stb0 interval(10h)) +sql select diff(val) from (select count(*) val from sub_stb0 interval(10h)) +sql select apercentile(val, 50) from (select count(*) val from sub_stb0 interval(10h)) + +# not support yet +sql select percentile(val, 50) from (select count(*) val from sub_stb0 interval(10h)) +sql select stddev(val) from (select count(*) val from sub_stb0 interval(10h)) + +print ====================>complex query + diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index f05474d15804fba661f650d69632c454415eba64..62af4818e58b4466074c9d50d13e04bae8ec4ec2 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -17,7 +17,7 @@ run general/parser/first_last.sim run general/parser/import_commit1.sim run general/parser/import_commit2.sim run general/parser/import_commit3.sim -#run general/parser/import_file.sim +run general/parser/import_file.sim run general/parser/insert_tb.sim run general/parser/tags_dynamically_specifiy.sim run general/parser/interp.sim @@ -38,12 +38,12 @@ run general/parser/slimit.sim run general/parser/slimit1.sim run general/parser/slimit_alter_tags.sim run general/parser/tbnameIn.sim -run general/parser/slimit_alter_tags.sim # persistent failed run general/parser/join.sim run general/parser/join_multivnode.sim run general/parser/join_manyblocks.sim run general/parser/projection_limit_offset.sim run general/parser/select_with_tags.sim +run general/parser/select_distinct_tag.sim run general/parser/groupby.sim run general/parser/tags_filter.sim run general/parser/topbot.sim @@ -54,5 +54,5 @@ run general/parser/timestamp.sim run general/parser/sliding.sim run general/parser/function.sim run general/parser/stableOp.sim -run general/parser/slimit_alter_tags.sim - +run general/parser/having.sim +run general/parser/having_child.sim \ No newline at end of file diff --git a/tests/script/general/stream/stream_1970.sim b/tests/script/general/stream/stream_1970.sim new file mode 100644 index 0000000000000000000000000000000000000000..30a733c08ff37d193df688ee956f3c5911671ddf --- /dev/null +++ b/tests/script/general/stream/stream_1970.sim @@ -0,0 +1,73 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start + +sleep 2000 +sql connect + +print ======================== dnode1 start + +$dbPrefix = s3_db +$tbPrefix = s3_tb +$mtPrefix = s3_mt +$stPrefix = s3_st +$tbNum = 10 +$rowNum = 20 +$totalNum = 200 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i +$st = $stPrefix . $i + +sql drop databae $db -x step1 +step1: +sql create database $db keep 36500 +sql use $db +sql create stable $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) + +sql create table cq1 as select count(*) from $mt interval(10s); + +sleep 1000 + +sql create table $st using $mt tags(1); + +sql insert into $st values (-50000, 1, 1.0); +sql insert into $st values (-10000, 1, 1.0); +sql insert into $st values (10000, 1, 1.0); + + +$i = 0 +while $i < 12 + sql select * from cq1; + + if $rows != 3 then + sleep 10000 + else + if $data00 != @70-01-01 07:59:10.000@ then + return -1 + endi + if $data01 != 1 then + return -1 + endi + if $data10 != @70-01-01 07:59:50.000@ then + return -1 + endi + if $data11 != 1 then + return -1 + endi + if $data20 != @70-01-01 08:00:10.000@ then + return -1 + endi + if $data21 != 1 then + return -1 + endi + $i = 12 + endi + + $i = $i + 1 +endw + diff --git a/tests/script/test.sh b/tests/script/test.sh index 1c7a7527abc90b3acc98fdf481b847258dfda489..a092a38a2d9491b49b64c80a42c74ebdd41236f3 100755 --- a/tests/script/test.sh +++ b/tests/script/test.sh @@ -132,6 +132,7 @@ if [ -n "$FILE_NAME" ]; then else echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME $PROGRAM -c $CFG_DIR -f $FILE_NAME +# valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME fi else echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim index 026678af7c5f7d1dfb8924152e8c26ea6dcdb2ec..0b80acbe6c9fc11455df6023f66da7f057db2d09 100644 --- a/tests/script/unique/cluster/balance2.sim +++ b/tests/script/unique/cluster/balance2.sim @@ -338,10 +338,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT print stop dnode1 and sleep 3000 sleep 3000 -sql drop dnode $hostname1 -print drop dnode1 and sleep 9000 -sleep 9000 - sql show mnodes $dnode1Role = $data2_1 $dnode4Role = $data2_4 @@ -357,6 +353,25 @@ endi print ============================== step6.1 system sh/exec.sh -n dnode1 -s start +$x = 0 +step6.1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 $data4_1 + +if $data4_1 != ready then + goto step6.1 +endi + +sql drop dnode $hostname1 +print drop dnode1 and sleep 9000 +sleep 9000 + $x = 0 show6: $x = $x + 1 diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim index 6f830d2cf8d50975a494854de9932fa74f41fb5c..25e0846129da4dfed1ffcafa9815d16848771ad7 100644 --- a/tests/script/unique/dnode/remove1.sim +++ b/tests/script/unique/dnode/remove1.sim @@ -97,7 +97,6 @@ if $data2_2 != 3 then endi print ========== step3 -sql drop dnode $hostname2 $x = 0 show3: @@ -114,6 +113,7 @@ print dnode2 openVnodes $data2_2 print ========== step4 sql create dnode $hostname3 system sh/exec.sh -n dnode3 -s start +sql drop dnode $hostname2 $x = 0 show4: @@ -224,4 +224,4 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT system sh/exec.sh -n dnode6 -s stop -x SIGINT system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim index ff92ff7eb05e87cc88a76af2d40502b36f0d0e4f..1d707bc4a319ce0dbc5bd66b9cff52318c25aa8d 100644 --- a/tests/script/unique/dnode/remove2.sim +++ b/tests/script/unique/dnode/remove2.sim @@ -98,7 +98,6 @@ endi print ========== step3 system sh/exec.sh -n dnode2 -s stop -x SIGINT -sql drop dnode $hostname2 sql show dnodes print dnode1 openVnodes $data2_1 @@ -128,6 +127,26 @@ endi print ============ step 4.1 system sh/exec.sh -n dnode2 -s start +$x = 0 +step4.1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 $data4_1 +print dnode2 $data4_2 +print dnode3 $data4_3 +print dnode4 $data4_4 + +if $data4_2 != ready then + goto step4.1 +endi + +sql drop dnode $hostname2 + $x = 0 show4: $x = $x + 1 diff --git a/tests/test-all.sh b/tests/test-all.sh index 980629e6d29b367927221236acf134957a6cf245..47e5de6aa0bd9821f6c30ba0dce6c03952f0a8a6 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -516,6 +516,15 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo "asyncdemo pass" totalExamplePass=`expr $totalExamplePass + 1` fi + + ./demo 127.0.0.1 > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "demo failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "demo pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi if [ "$totalExamplePass" -gt "0" ]; then echo -e "\n${GREEN} ### Total $totalExamplePass examples succeed! ### ${NC}" diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index d2494eddbbbb1756e4daf63e7532fdb466a4824e..0879e371ef62fee81786728e2b980442567fbaa1 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -164,10 +164,9 @@ void *simExecuteScript(void *inputScript) { } if (script->killed || script->linePos >= script->numOfLines) { - printf("killed ---------------------->\n"); script = simProcessCallOver(script); if (script == NULL) { - printf("abort now!\n"); + simDebug("sim test abort now!"); break; } } else {