main.go 25.0 KB
Newer Older
X
xieyinglin 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
package main

import (
	"bufio"
	"bytes"
	"database/sql"
	"encoding/json"
	"flag"
	"fmt"
	"github.com/taosdata/TDengine/importSampleData/import"
	"hash/crc32"
	"io"
	"log"
	"os"
	"sort"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	_ "github.com/taosdata/TDengine/src/connector/go/src/taosSql"
)

const (
	TIMESTAMP  = "timestamp"
	DATETIME  = "datetime"
	MILLISECOND  = "millisecond"
	DEFAULT_STARTTIME int64 = -1
	DEFAULT_INTERVAL int64 = 1*1000

	JSON_FORMAT = "json"
	CSV_FORMAT = "csv"
	SUPERTABLE_PREFIX = "s_"
	SUBTABLE_PREFIX = "t_"

	DRIVER_NAME = "taosSql"
	STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
	INSERT_PREFIX = "insert into "
)

var (

	cfg string
	cases   string
	hnum    int
	vnum  int
	thread  int
	batch  int
	auto		int
	starttimestr  string
	interval	int64
	host       string
	port       int
	user       string
	password   string
	dropdb   int
	db         string
	dbparam		string

	dataSourceName string
	startTime  int64

	superTableConfigMap  = make(map[string]*superTableConfig)
	subTableMap	= make(map[string]*dataRows)
	scaleTableNames []string

	scaleTableMap = make(map[string]*scaleTableInfo)

    totalSuccessRows int64
	delay int64  // default 10 milliseconds
)

type superTableConfig struct {
	startTime int64
	endTime int64
	cycleTime int64
	avgInterval int64
	config dataimport.CaseConfig
}

type scaleTableInfo struct {
	scaleTableName string
	subTableName string
	insertRows int64
}

type tableRows struct {
	tableName string  // tableName
	value string      // values(...)
}

type dataRows struct {
	rows         []map[string]interface{}
	config       dataimport.CaseConfig
}

func (rows dataRows) Len() int {
	return len(rows.rows)
}

func (rows dataRows) Less(i, j int) bool {
	itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
	jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
	return itime < jtime
}

func (rows dataRows) Swap(i, j int) {
	rows.rows[i], rows.rows[j] = rows.rows[j], rows.rows[i]
}

func getPrimaryKey(value interface{}) int64 {
	val, _ := value.(int64)
	//time, _ := strconv.ParseInt(str, 10, 64)
	return val
}

func init() {
	parseArg() //parse argument

	if db == "" {
		//db = "go"
		db = fmt.Sprintf("test_%s",time.Now().Format("20060102"))
	}

	if auto == 1 && len(starttimestr) == 0  {
		log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
	}

	if len(starttimestr) != 0 {
		t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local)
		if err != nil {
			log.Fatalf("param startTime %s error, %s\n", starttimestr, err)
		}

		startTime = t.UnixNano() / 1e6 // as millisecond
	}else{
		startTime = DEFAULT_STARTTIME
	}

	dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)

	printArg()

	log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
}

func main() {

	importConfig := dataimport.LoadConfig(cfg)

	for _, userCase := range strings.Split(cases, ",") {
		caseConfig, ok := importConfig.UserCases[userCase]

		if !ok {
			log.Println("not exist case: ", userCase)
			continue
		}

		checkUserCaseConfig(userCase, &caseConfig)

		//read file as map array
		fileRows := readFile(caseConfig)
		log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))

		if len(fileRows.rows) == 0 {
			log.Printf("there is no valid line in file %s\n", caseConfig.FilePath)
			continue
		}

		_, exists := superTableConfigMap[caseConfig.Stname]
		if !exists {
			superTableConfigMap[caseConfig.Stname] = &superTableConfig{config:caseConfig}
		} else {
			log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
		}

		var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)

		// set super table's startTime, cycleTime and avgInterval
		superTableConfigMap[caseConfig.Stname].startTime = start
		superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval
		superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime

		startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT)
		log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
	}

	superTableNum := len(superTableConfigMap)
	if superTableNum == 0 {
		log.Fatalln("no valid file, exited")
	}

	start := time.Now()
	// create super table
	createSuperTable(superTableConfigMap)
	log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)

	//create sub table
	start = time.Now()
	createSubTable(subTableMap)
	log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)

	subTableNum := len(scaleTableMap)

	if subTableNum < thread {
		thread = subTableNum
	}

	filePerThread := subTableNum / thread
	leftFileNum := subTableNum % thread

	var wg  sync.WaitGroup

	start = time.Now()

	startIndex, endIndex := 0, filePerThread
	for i := 0; i < thread; i++ {
		// start thread
		if i < leftFileNum {
			endIndex++
		}
		wg.Add(1)

		go insertData(i, startIndex, endIndex, &wg)
		startIndex, endIndex = endIndex, endIndex+filePerThread
	}

	wg.Wait()

	usedTime :=  time.Since(start)

	log.Printf("finished insert %d rows, used %d ms, speed %d rows/s", totalSuccessRows, usedTime/1e6, totalSuccessRows * 1e9 / int64(usedTime))

	if vnum == 0 {
		// continue waiting for insert data
		wait :=  make(chan string)
		v := <- wait
		log.Printf("program receive %s, exited.\n", v)
	}

}

func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval int64){
	if auto == 1 {
		// use auto generate data time
		start = startTime
		avgInterval = interval
		maxTableRows := normalizationDataWithSameInterval(fileRows, avgInterval)
		cycleTime = maxTableRows * avgInterval + avgInterval

	} else {

		// use the sample data primary timestamp
		sort.Sort(fileRows)// sort the file data by the primarykey
		minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
		maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])

		start = minTime // default startTime use the minTime
		if DEFAULT_STARTTIME != startTime {
			start = startTime
		}

		tableNum := normalizationData(fileRows, minTime)

		if minTime == maxTime {
			avgInterval = interval
			cycleTime = tableNum * avgInterval + avgInterval
		}else{
			avgInterval = (maxTime - minTime) / int64(len(fileRows.rows)) * tableNum
			cycleTime = maxTime - minTime + avgInterval
		}
	
	}
	return
}


func createSubTable(subTableMaps map[string]*dataRows) {

	connection := getConnection()
	defer connection.Close()

	connection.Exec("use " + db)

	createTablePrefix := "create table if not exists "
	for subTableName := range subTableMaps {

		superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname)
		tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags

		buffers := bytes.Buffer{}
		// create table t using supertTable tags(...);
		for i := 0; i < hnum; i++ {
			tableName := getScaleSubTableName(subTableName, i)

			scaleTableMap[tableName] = &scaleTableInfo{
				subTableName:   subTableName,
				insertRows: 0,
			}
			scaleTableNames = append(scaleTableNames, tableName)

			buffers.WriteString(createTablePrefix)
			buffers.WriteString(tableName)
			buffers.WriteString(" using ")
			buffers.WriteString(superTableName)
			buffers.WriteString(" tags(")
			for _, tag := range subTableMaps[subTableName].config.Tags{
				tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
				buffers.WriteString("'" + tagValue + "'")
				buffers.WriteString(",")
			}
			buffers.Truncate(buffers.Len()-1)
			buffers.WriteString(")")

			createTableSql := buffers.String()
			buffers.Reset()

			//log.Printf("create table: %s\n", createTableSql)
			_, err := connection.Exec(createTableSql)
			if err != nil {
				log.Fatalf("create table error: %s\n", err)
			}
		}
	}
}

func createSuperTable(superTableConfigMap map[string]*superTableConfig) {

	connection := getConnection()
	defer connection.Close()

	if dropdb == 1 {
		dropDbSql := "drop database if exists " + db
		_, err := connection.Exec(dropDbSql) // drop database if exists
		if err != nil {
			log.Fatalf("drop database error: %s\n", err)
		}
		log.Printf("dropDb: %s\n", dropDbSql)
	}

	createDbSql := "create database if not exists " + db + " " + dbparam

	_, err := connection.Exec(createDbSql) // create database if not exists
	if err != nil {
		log.Fatalf("create database error: %s\n", err)
	}
	log.Printf("createDb: %s\n", createDbSql)

	connection.Exec("use " + db)

	prefix := "create table if not exists "
	var buffer bytes.Buffer
	//CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
	for key := range superTableConfigMap {

		buffer.WriteString(prefix)
		buffer.WriteString(getSuperTableName(key))
		buffer.WriteString("(")

		superTableConf := superTableConfigMap[key]

		buffer.WriteString(superTableConf.config.Timestamp)
		buffer.WriteString(" timestamp, ")

		for _, field := range superTableConf.config.Fields {
			buffer.WriteString(field.Name + " " + field.Type + ",")
		}

		buffer.Truncate(buffer.Len()-1)
		buffer.WriteString(") tags( ")

		for _, tag := range superTableConf.config.Tags {
			buffer.WriteString(tag.Name + " " + tag.Type + ",")
		}

		buffer.Truncate(buffer.Len()-1)
		buffer.WriteString(")")

		createSql := buffer.String()
		buffer.Reset()

		//log.Printf("supertable: %s\n", createSql)
		_, err = connection.Exec(createSql)
		if err != nil {
			log.Fatalf("create supertable error: %s\n", err)
		}
	}

}

func getScaleSubTableName(subTableName string, hnum int) string {
	if hnum == 0 {
		 return subTableName
	}
	return fmt.Sprintf(  "%s_%d", subTableName, hnum)
}

func getSuperTableName(stname string) string {
	return SUPERTABLE_PREFIX + stname
}


/**
* normalizationData , and return the num of subTables
 */
func normalizationData(fileRows dataRows, minTime int64) int64 {

	var tableNum int64 = 0
	for _, row := range fileRows.rows {
		// get subTableName
		tableValue := getSubTableNameValue(row[fileRows.config.SubTableName])
		if len(tableValue) == 0 {
			continue
		}

		row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime

		subTableName := getSubTableName(tableValue, fileRows.config.Stname)

		value, ok := subTableMap[subTableName]
		if !ok {
			subTableMap[subTableName] = &dataRows{
				rows:         []map[string]interface{}{row},
				config:       fileRows.config,
			}

			tableNum++
		}else{
			value.rows = append(value.rows, row)
		}
	}
	return tableNum
}

// return the maximum table rows
func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int64{
	// subTableMap
	currSubTableMap	:= make(map[string]*dataRows)
	for _, row := range fileRows.rows {
		// get subTableName
		tableValue := getSubTableNameValue(row[fileRows.config.SubTableName])
		if len(tableValue) == 0 {
			continue
		}

		subTableName := getSubTableName(tableValue, fileRows.config.Stname)

		value, ok := currSubTableMap[subTableName]
		if !ok {
			row[fileRows.config.Timestamp] = 0
			currSubTableMap[subTableName] = &dataRows{
				rows:         []map[string]interface{}{row},
				config:       fileRows.config,
			}
		}else{
			row[fileRows.config.Timestamp] = int64(len(value.rows)) * avgInterval
			value.rows = append(value.rows, row)
		}

	}

	var maxRows, tableRows int = 0, 0
	for tableName := range currSubTableMap{
		tableRows = len(currSubTableMap[tableName].rows)
		subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
		if tableRows > maxRows {
			maxRows = tableRows
		}
	}

	return int64(maxRows)
}


func getSubTableName(subTableValue string, superTableName string)  string {
	return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
}


func insertData(threadIndex, start, end int, wg  *sync.WaitGroup) {
	connection := getConnection()
	defer connection.Close()
	defer wg.Done()

	connection.Exec("use " + db) // use db

	num := 0
	for {
		log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)

		threadStartTime := time.Now()

		var successRows int64
		
		var rows []tableRows

		subTables := scaleTableNames[start:end]
		for _, tableName := range subTables {

			subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
			subTableRows := int64(len(subTableInfo.rows))
			superTableConf := superTableConfigMap[subTableInfo.config.Stname]

			tableStartTime := superTableConf.startTime
			var tableEndTime int64
			if vnum == 0 {
				// need continue generate data
				tableEndTime = time.Now().UnixNano()/1e6
			}else {
				tableEndTime = tableStartTime + superTableConf.cycleTime * int64(vnum) - superTableConf.avgInterval
			}

			insertRows := scaleTableMap[tableName].insertRows

			for {
				loopNum := insertRows / subTableRows
				rowIndex := insertRows % subTableRows
				currentRow := subTableInfo.rows[rowIndex]

				currentTime := getPrimaryKey(currentRow[subTableInfo.config.Timestamp]) + loopNum * superTableConf.cycleTime + tableStartTime
				if currentTime <= tableEndTime {
					// append
					row := buildRow(tableName, currentTime, subTableInfo, currentRow)
					rows = append(rows, row)

					insertRows++
					if  len(rows) == batch {
						// executebatch
						insertSql := buildSql(rows)
						affectedRows := executeBatchInsert(insertSql, connection)
						successRows = atomic.AddInt64(&successRows, affectedRows)

						rows = []tableRows{}
					}
				}else {
					// finished insert current table
					break
				}
			}

			scaleTableMap[tableName].insertRows = insertRows

		}
		
		left := len(rows)
		if  left > 0 {
			// executebatch
			insertSql := buildSql(rows)
			affectedRows := executeBatchInsert(insertSql, connection)
			successRows = atomic.AddInt64(&successRows, affectedRows)
		}

		atomic.AddInt64(&totalSuccessRows, successRows)
		log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, successRows, time.Since(threadStartTime)/1e6)

		if vnum != 0 {
			// thread finished insert data
			break
		}

		if(num == 0){
			wg.Done() // finished insert history data
		}
		num++

		// need continue insert data
		// log.Printf("thread-%d start to sleep %d ms.", threadIndex, delay)
		time.Sleep(time.Duration(delay) * time.Millisecond)

	}

}

func buildSql(rows []tableRows) string{

	var lastTableName string

	buffers := bytes.Buffer{}

	for i, row := range rows {
		if i == 0 {
			lastTableName = row.tableName
			buffers.WriteString(INSERT_PREFIX)
			buffers.WriteString(row.tableName)
			buffers.WriteString(" values")
			buffers.WriteString(row.value)
			continue
		}

		if lastTableName == row.tableName {
			buffers.WriteString(row.value)
		}else {
			buffers.WriteString(" ")
			buffers.WriteString(row.tableName)
			buffers.WriteString(" values")
			buffers.WriteString(row.value)
			lastTableName = row.tableName
		}
	}

	inserSql := buffers.String()
	return inserSql
}

func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows{

	tableRows := tableRows{tableName: tableName}

	buffers := bytes.Buffer{}

	buffers.WriteString("(")
	buffers.WriteString(fmt.Sprintf("%v", currentTime))
	buffers.WriteString(",")

	for _,field := range subTableInfo.config.Fields {
		buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
		buffers.WriteString(",")
	}

	buffers.Truncate(buffers.Len()-1)
	buffers.WriteString(")")

	insertSql := buffers.String()
	tableRows.value = insertSql

	return tableRows
}

func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
	result, error := connection.Exec(insertSql)
	if error != nil {
		log.Printf("execute insertSql %s error, %s\n", insertSql, error)
		return 0
	}
	affected, _ := result.RowsAffected()
	if affected < 0 {
		affected = 0
	}
	return affected
}

func getFieldValue(fieldValue interface{}) string {
	return fmt.Sprintf("'%v'", fieldValue)
}

func getConnection() *sql.DB{
	db, err := sql.Open(DRIVER_NAME, dataSourceName)
	if err != nil {
		panic(err)
	}
	return db
}


func getSubTableNameValue(suffix interface{}) string {
	return fmt.Sprintf("%v", suffix)
}

func hash(s string) int {
	v := int(crc32.ChecksumIEEE([]byte(s)))
	if v < 0 {
		return -v
	}
	return v
}

func readFile(config dataimport.CaseConfig) dataRows {
	fileFormat := strings.ToLower(config.Format)
	if fileFormat == JSON_FORMAT {
		return readJSONFile(config)
	} else if fileFormat == CSV_FORMAT {
		return readCSVFile(config)
	}

	log.Printf("the file %s is not supported yet\n", config.FilePath)
	return dataRows{}
}

func readCSVFile(config dataimport.CaseConfig) dataRows {
	var rows dataRows
	f, err := os.Open(config.FilePath)
	if err != nil {
		log.Printf("Error: %s, %s\n", config.FilePath, err)
		return rows
	}
	defer f.Close()

	r := bufio.NewReader(f)

	//read the first line as title
	lineBytes, _, err := r.ReadLine()
	if err == io.EOF {
		log.Printf("the file %s is empty\n", config.FilePath)
		return rows
	}
	line := strings.ToLower(string(lineBytes))
	titles := strings.Split(line, config.Separator)
	if len(titles) < 3 {
		// need suffix、 primarykey and at least one other field
		log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
		return rows
	}

	rows.config = config

	var lineNum = 0
	for {
		// read data row
		lineBytes, _, err = r.ReadLine()
		lineNum++
		if err == io.EOF {
			break
		}
		// fmt.Println(line)
		rowData := strings.Split(string(lineBytes), config.Separator)

		dataMap := make(map[string]interface{})
		for i, title := range titles {
			title = strings.TrimSpace(title)
			if i < len(rowData) {
				dataMap[title] = strings.TrimSpace(rowData[i])
			} else {
				dataMap[title] = ""
			}
		}

		// if the suffix valid
		if !existMapKeyAndNotEmpty(config.Timestamp, dataMap) {
			log.Printf("the Timestamp[%s] of line %d is empty, will filtered.\n", config.Timestamp, lineNum)
			continue
		}

		// if the primary key valid
		primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
		if primaryKeyValue == -1 {
			log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
			continue
		}

		dataMap[config.Timestamp] = primaryKeyValue

		rows.rows = append(rows.rows, dataMap)
	}
	return rows
}

func readJSONFile(config dataimport.CaseConfig) dataRows {

	var rows dataRows
	f, err := os.Open(config.FilePath)
	if err != nil {
		log.Printf("Error: %s, %s\n", config.FilePath, err)
		return rows
	}
	defer f.Close()

	r := bufio.NewReader(f)
	//log.Printf("file size %d\n", r.Size())

	rows.config = config
	var lineNum = 0
	for {
		lineBytes, _, err := r.ReadLine()
		lineNum++
		if err == io.EOF {
			break
		}

		line := make(map[string]interface{})
		err = json.Unmarshal(lineBytes, &line)

		if err != nil {
			log.Printf("line [%d] of file %s parse error, reason:  %s\n", lineNum, config.FilePath, err)
			continue
		}

		// transfer the key to lowercase
		lowerMapKey(line)

		if !existMapKeyAndNotEmpty(config.SubTableName, line) {
			log.Printf("the SubTableName[%s] of line %d is empty, will filtered.\n", config.SubTableName, lineNum)
			continue
		}

		primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
		if primaryKeyValue == -1 {
			log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
			continue
		}

		line[config.Timestamp] = primaryKeyValue

		rows.rows = append(rows.rows, line)
	}

	return rows
}

/**
* get primary key as millisecond , otherwise return -1
 */
func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
	if !existMapKeyAndNotEmpty(key, line) {
		return -1
	}
	if DATETIME == valueType {
		// transfer the datetime to milliseconds
		return parseMillisecond(line[key], valueFormat)
	}

	value, err := strconv.ParseInt(fmt.Sprintf("%v", line[key]), 10, 64)
	// as millisecond num
	if err != nil {
		return -1
	}
	return value
}

// parseMillisecond parse the dateStr to millisecond, return -1 if failed
func parseMillisecond(str interface{}, layout string) int64 {
	value, ok := str.(string)
	if !ok {
		return -1
	}

	t, err := time.ParseInLocation(layout, strings.TrimSpace(value), time.Local)

	if err != nil {
		log.Println(err)
		return -1
	}
	return  t.UnixNano()/1e6
}

// lowerMapKey transfer all the map key to lowercase
func lowerMapKey(maps map[string]interface{}) {
	for key := range maps {
		value := maps[key]
		delete(maps, key)
		maps[strings.ToLower(key)] = value
	}
}

func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool {
	value, ok := maps[key]
	if !ok {
		return false
	}

	str, err := value.(string)
	if err && len(str) == 0 {
		return false
	}
	return true
}

func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {

	if len(caseConfig.Stname) == 0 {
		log.Fatalf("the stname of case %s can't be empty\n", caseName)
	}

	caseConfig.Stname = strings.ToLower(caseConfig.Stname)

	if len(caseConfig.Tags) == 0 {
		log.Fatalf("the tags of case %s can't be empty\n", caseName)
	}

	if len(caseConfig.Fields) == 0 {
		log.Fatalf("the fields of case %s can't be empty\n", caseName)
	}

	if len(caseConfig.SubTableName) == 0 {
		log.Fatalf("the suffix of case %s can't be empty\n", caseName)
	}

	caseConfig.SubTableName = strings.ToLower(caseConfig.SubTableName)

	caseConfig.Timestamp = strings.ToLower(caseConfig.Timestamp)

	var timestampExist = false
	for i, field := range caseConfig.Fields {
		if strings.EqualFold(field.Name, caseConfig.Timestamp) {
			if strings.ToLower(field.Type) != TIMESTAMP {
				log.Fatalf("case %s's primaryKey %s field type is %s, it must be timestamp\n", caseName, caseConfig.Timestamp, field.Type)
			}
			timestampExist = true
			if i < len(caseConfig.Fields)-1 {
				// delete middle item,  a = a[:i+copy(a[i:], a[i+1:])]
				caseConfig.Fields = caseConfig.Fields[:i+copy(caseConfig.Fields[i:], caseConfig.Fields[i+1:])]
			}else {
				// delete the last item
				caseConfig.Fields = caseConfig.Fields[:len(caseConfig.Fields)-1]
			}
			break
		}
	}

	if !timestampExist {
		log.Fatalf("case %s primaryKey %s is not exist in fields\n", caseName, caseConfig.Timestamp)
	}

	caseConfig.TimestampType = strings.ToLower(caseConfig.TimestampType)
	if caseConfig.TimestampType != MILLISECOND && caseConfig.TimestampType != DATETIME {
		log.Fatalf("case %s's timestampType %s error, only can be timestamp or datetime\n", caseName, caseConfig.TimestampType)
	}

	if caseConfig.TimestampType == DATETIME && len(caseConfig.TimestampTypeFormat) == 0 {
		log.Fatalf("case %s's timestampTypeFormat %s can't be empty when timestampType is datetime\n", caseName, caseConfig.TimestampTypeFormat)
	}

}

func parseArg() {
	flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.")
	flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
	flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
	flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
	flag.Int64Var(&delay, "delay", 3 * 1000, "the delay millisecond to continue generate data when vnum set 0.")
	flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
	flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
	flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
	flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.")
	flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
	flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
	flag.IntVar(&port, "port", 6030, "tdengine server port.")
	flag.StringVar(&user, "user", "root", "user name to login into the database.")
	flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
	flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.")
	flag.StringVar(&db, "db", "", "name of the database to store data.")
	flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")

	flag.Parse()
}

func printArg()  {
	fmt.Println("used param: ")
	fmt.Println("-cfg: ", cfg)
	fmt.Println("-cases:", cases)
	fmt.Println("-hnum:", hnum)
	fmt.Println("-vnum:", vnum)
	fmt.Println("-thread:", thread)
	fmt.Println("-batch:", batch)
	fmt.Println("-auto:", auto)
	fmt.Println("-start:", starttimestr)
	fmt.Println("-interval:", interval)
	fmt.Println("-delay:", delay)
	fmt.Println("-host:", host)
	fmt.Println("-port", port)
	fmt.Println("-user", user)
	fmt.Println("-password", password)
	fmt.Println("-dropdb", dropdb)
	fmt.Println("-db", db)
	fmt.Println("-dbparam", dbparam)
}