提交 bcc1b406 编写于 作者: S slguan

fix issue #313

上级 744d31cb
...@@ -19,6 +19,43 @@ ...@@ -19,6 +19,43 @@
#include "tgJson.h" #include "tgJson.h"
#include "tsdb.h" #include "tsdb.h"
/*
* taos.telegraf.cfg formats like
{
"metrics": [
{
"name" : "system",
"tbname" : "system_uptime",
"fields": [
"uptime"
]
},
{
"name": "system",
"tbname" : "system_uptime_format",
"fields": [
"uptime_format"
]
},
{
"name": "swap",
"tbname" : "swap_in",
"fields": [
"in"
]
},
{
"name": "cpu",
"tbname" : "cpu_time",
"fields": [
"time_active",
"time_guest"
]
}
]
}
*/
#define TG_MAX_SORT_TAG_SIZE 20 #define TG_MAX_SORT_TAG_SIZE 20
static HttpDecodeMethod tgDecodeMethod = {"telegraf", tgProcessRquest}; static HttpDecodeMethod tgDecodeMethod = {"telegraf", tgProcessRquest};
...@@ -26,80 +63,72 @@ static HttpEncodeMethod tgQueryMethod = {tgStartQueryJson, tgStopQueryJs ...@@ -26,80 +63,72 @@ static HttpEncodeMethod tgQueryMethod = {tgStartQueryJson, tgStopQueryJs
tgBuildSqlAffectRowsJson, tgInitQueryJson, tgCleanQueryJson, tgBuildSqlAffectRowsJson, tgInitQueryJson, tgCleanQueryJson,
tgCheckFinished, tgSetNextCmd}; tgCheckFinished, tgSetNextCmd};
typedef struct { static const char DEFAULT_TELEGRAF_CFG[] =
char *tagName; "{\"metrics\":["
char *tagAlias; "{\"name\":\"system\",\"tbname\":\"system_uptime\",\"fields\":[\"uptime\"]},"
char *tagType; "{\"name\":\"system\",\"tbname\":\"system_uptime_format\",\"fields\":[\"uptime_format\"]},"
} STgTag; "{\"name\":\"swap\",\"tbname\":\"swap_in\",\"fields\":[\"in\"]},"
"{\"name\":\"cpu\",\"tbname\":\"cpu_time\",\"fields\":[\"time_active\",\"time_guest\"]}"
"]}";
typedef struct { typedef struct {
char *fieldName; char *name;
char *fieldAlias; char *tbName;
char *fieldType; char **fields;
} STgField; int fieldNum;
} STgSchema;
typedef struct { typedef struct {
char * stName; STgSchema *schemas;
char * stAlias; int size;
STgTag * tags; int pos;
STgField *fields; } STgSchemas;
int16_t tagNum;
int16_t fieldNum;
char * createSTableStr;
} STgStable;
/* static STgSchemas tgSchemas = {0};
* hash of STgStable
*/
static void *tgSchemaHash = NULL;
/* void tgFreeSchema(STgSchema *schema) {
* formats like if (schema->name != NULL) {
* behind the midline is an alias of field/tag/stable free(schema->name);
{ schema->name = NULL;
"metrics": [{ }
"name": "win_cpu-cpu", if (schema->tbName != NULL) {
"fields": { free(schema->tbName);
"Percent_DPC_Time": "float", schema->tbName = NULL;
"Percent_Idle_Time": "float", }
"Percent_Interrupt_Time": "float", if (schema->fields != NULL) {
"Percent_Privileged_Time": "float", for (int f = 0; f < schema->fieldNum; ++f) {
"Percent_Processor_Time": "float", if (schema->fields[f] != NULL) {
"Percent_User_Time": "float" free(schema->fields[f]);
}, schema->fields[f] = NULL;
"tags": { }
"host": "binary(32)",
"instance": "binary(32)",
"objectname": "binary(32)"
} }
}, free(schema->fields);
{ schema->fields = NULL;
"fields": { schema->fieldNum = 0;
"Bytes_Received_persec-f1": "float",
"Bytes_Sent_persec-f2": "float",
"Packets_Outbound_Discarded-f3": "float",
"Packets_Outbound_Errors-f4": "float",
"Packets_Received_Discarded-f5": "float",
"Packets_Received_Errors": "float",
"Packets_Received_persec": "float",
"Packets_Sent_persec": "float"
},
"name": "win_net",
"tags": {
"host": "binary(32)",
"instance": "binary(32)",
"objectname": "binary(32)"
},
"timestamp": 1536219762000
}]
} }
*/ }
void tgReadSchemaMetric(cJSON *metric) {
STgStable stable = {0};
int createSTableStrLen = 100;
bool parsedOk = true;
// stable name void tgFreeSchemas() {
if (tgSchemas.schemas != NULL) {
for (int s = 0; s < tgSchemas.size; ++s) {
tgFreeSchema(&tgSchemas.schemas[s]);
}
free(tgSchemas.schemas);
tgSchemas.size = 0;
}
}
void tgInitSchemas(int size) {
tgFreeSchemas();
tgSchemas.schemas = calloc(sizeof(STgSchema), size);
tgSchemas.size = 0;
}
void tgParseSchemaMetric(cJSON *metric) {
STgSchema schema = {0};
bool parsedOk = true;
// name
cJSON *name = cJSON_GetObjectItem(metric, "name"); cJSON *name = cJSON_GetObjectItem(metric, "name");
if (name == NULL) { if (name == NULL) {
parsedOk = false; parsedOk = false;
...@@ -118,331 +147,143 @@ void tgReadSchemaMetric(cJSON *metric) { ...@@ -118,331 +147,143 @@ void tgReadSchemaMetric(cJSON *metric) {
parsedOk = false; parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
int aliasPos = -1;
for (int i = 0; i < nameLen - 1; ++i) { schema.name = calloc(nameLen + 1, 1);
if (name->valuestring[i] == '-') { strcpy(schema.name, name->valuestring);
aliasPos = i;
break; // tbname
} cJSON *tbname = cJSON_GetObjectItem(metric, "tbname");
if (tbname == NULL) {
parsedOk = false;
goto ParseEnd;
} }
if (aliasPos == -1) { if (tbname->type != cJSON_String) {
stable.stName = stable.stAlias = calloc((size_t)nameLen + 1, 1); parsedOk = false;
strcpy(stable.stName, name->valuestring); goto ParseEnd;
createSTableStrLen += nameLen;
} else {
stable.stName = calloc((size_t)aliasPos + 1, 1);
stable.stAlias = calloc((size_t)(nameLen - aliasPos), 1);
strncpy(stable.stName, name->valuestring, (size_t)aliasPos);
strncpy(stable.stAlias, name->valuestring + aliasPos + 1, (size_t)(nameLen - aliasPos - 1));
createSTableStrLen += (nameLen - aliasPos);
} }
if (tbname->valuestring == NULL) {
// tags
cJSON *tags = cJSON_GetObjectItem(metric, "tags");
if (tags == NULL) {
parsedOk = false; parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
int tagsSize = cJSON_GetArraySize(tags); int tbnameLen = (int)strlen(tbname->valuestring);
if (tagsSize <= 0 || tagsSize > TSDB_MAX_TAGS) { if (tbnameLen == 0) {
parsedOk = false; parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
stable.tags = calloc(sizeof(STgTag), (size_t)tagsSize);
stable.tagNum = (int16_t)tagsSize;
for (int i = 0; i < tagsSize; i++) {
STgTag *tagSchema = &stable.tags[i];
cJSON * tag = cJSON_GetArrayItem(tags, i);
if (tag == NULL) {
parsedOk = false;
goto ParseEnd;
}
if (tag->string == NULL) {
parsedOk = false;
goto ParseEnd;
}
int nameLen = (int)strlen(tag->string);
if (nameLen == 0 || nameLen > TSDB_METER_NAME_LEN) {
parsedOk = false;
goto ParseEnd;
}
int aliasPos = -1;
for (int i = 0; i < nameLen - 1; ++i) {
if (tag->string[i] == '-') {
aliasPos = i;
break;
}
}
if (aliasPos == -1) {
tagSchema->tagName = calloc((size_t)nameLen + 1, 1);
strcpy(tagSchema->tagName, tag->string);
tagSchema->tagAlias = calloc((size_t)nameLen + 3, 1);
strcpy(tagSchema->tagAlias, "t_");
strcpy(tagSchema->tagAlias + 2, tag->string);
createSTableStrLen += (nameLen + 4);
} else {
tagSchema->tagName = calloc((size_t)aliasPos + 1, 1);
tagSchema->tagAlias = calloc((size_t)(nameLen - aliasPos), 1);
strncpy(tagSchema->tagName, tag->string, (size_t)aliasPos);
strncpy(tagSchema->tagAlias, tag->string + aliasPos + 1, (size_t)(nameLen - aliasPos - 1));
createSTableStrLen += (nameLen - aliasPos + 2);
}
if (tag->type == cJSON_String) { schema.tbName = calloc(tbnameLen + 1, 1);
if (tag->valuestring == NULL) { strcpy(schema.tbName, tbname->valuestring);
parsedOk = false;
goto ParseEnd;
}
int valueLen = (int)strlen(tag->valuestring);
if (valueLen == 0) {
parsedOk = false;
goto ParseEnd;
}
if (strcasecmp(tag->valuestring, "timestamp") == 0 || strcasecmp(tag->valuestring, "bool") == 0 ||
strcasecmp(tag->valuestring, "tinyint") == 0 || strcasecmp(tag->valuestring, "smallint") == 0 ||
strcasecmp(tag->valuestring, "int") == 0 || strcasecmp(tag->valuestring, "bigint") == 0 ||
strcasecmp(tag->valuestring, "float") == 0 || strcasecmp(tag->valuestring, "double") == 0 ||
strncasecmp(tag->valuestring, "binary", 6) == 0 || strncasecmp(tag->valuestring, "nchar", 5) == 0) {
tagSchema->tagType = calloc((size_t)valueLen + 1, 1);
strcpy(tagSchema->tagType, tag->valuestring);
createSTableStrLen += valueLen;
} else {
tagSchema->tagType = calloc(11, 1);
strcpy(tagSchema->tagType, "binary(32)");
createSTableStrLen += 12;
}
} else if (tag->type == cJSON_False || tag->type == cJSON_True) {
tagSchema->tagType = calloc(8, 1);
strcpy(tagSchema->tagType, "tinyint");
createSTableStrLen += 10;
} else {
tagSchema->tagType = calloc(7, 1);
strcpy(tagSchema->tagType, "bigint");
createSTableStrLen += 9;
}
}
// fields // fields
cJSON *fields = cJSON_GetObjectItem(metric, "fields"); cJSON *fields = cJSON_GetObjectItem(metric, "fields");
if (fields == NULL) { if (fields == NULL) {
parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
int fieldSize = cJSON_GetArraySize(fields); int fieldSize = cJSON_GetArraySize(fields);
if (fieldSize <= 0 || fieldSize > TSDB_MAX_COLUMNS) { if (fieldSize <= 0 || fieldSize > TSDB_MAX_COLUMNS) {
parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
stable.fields = calloc(sizeof(STgField), (size_t)fieldSize);
stable.fieldNum = (int16_t)fieldSize;
for (int i = 0; i < fieldSize; i++) {
STgField *fieldSchema = &stable.fields[i];
cJSON * field = cJSON_GetArrayItem(fields, i);
if (field == NULL) {
parsedOk = false;
goto ParseEnd;
}
if (field->string == NULL) {
parsedOk = false;
goto ParseEnd;
}
int nameLen = (int)strlen(field->string);
if (nameLen == 0 || nameLen > TSDB_METER_NAME_LEN) {
parsedOk = false;
goto ParseEnd;
}
int aliasPos = -1;
for (int i = 0; i < nameLen - 1; ++i) {
if (field->string[i] == '-') {
aliasPos = i;
break;
}
}
if (aliasPos == -1) {
fieldSchema->fieldName = calloc((size_t)nameLen + 1, 1);
strcpy(fieldSchema->fieldName, field->string);
fieldSchema->fieldAlias = calloc((size_t)nameLen + 3, 1);
strcpy(fieldSchema->fieldAlias, "f_");
strcpy(fieldSchema->fieldAlias + 2, field->string);
createSTableStrLen += (nameLen + 4);
} else {
fieldSchema->fieldName = calloc((size_t)aliasPos + 1, 1);
fieldSchema->fieldAlias = calloc((size_t)(nameLen - aliasPos), 1);
strncpy(fieldSchema->fieldName, field->string, (size_t)aliasPos);
strncpy(fieldSchema->fieldAlias, field->string + aliasPos + 1, (size_t)(nameLen - aliasPos - 1));
createSTableStrLen += (nameLen - aliasPos + 2);
}
if (field->type == cJSON_String) { if (fieldSize > 0) {
if (field->valuestring == NULL) { schema.fields = calloc(sizeof(STgSchema), (size_t)fieldSize);
schema.fieldNum = fieldSize;
for (int i = 0; i < fieldSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (field == NULL) {
parsedOk = false; parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
int valueLen = (int)strlen(field->valuestring); if (field->valuestring == NULL) {
if (valueLen == 0) {
parsedOk = false; parsedOk = false;
goto ParseEnd; goto ParseEnd;
} }
if (strcasecmp(field->valuestring, "timestamp") == 0 || strcasecmp(field->valuestring, "bool") == 0 || int nameLen = (int)strlen(field->valuestring);
strcasecmp(field->valuestring, "tinyint") == 0 || strcasecmp(field->valuestring, "smallint") == 0 || if (nameLen == 0 || nameLen > TSDB_METER_NAME_LEN) {
strcasecmp(field->valuestring, "int") == 0 || strcasecmp(field->valuestring, "bigint") == 0 || parsedOk = false;
strcasecmp(field->valuestring, "float") == 0 || strcasecmp(field->valuestring, "double") == 0 || goto ParseEnd;
strncasecmp(field->valuestring, "binary", 6) == 0 || strncasecmp(field->valuestring, "nchar", 5) == 0) {
fieldSchema->fieldType = calloc((size_t)valueLen + 1, 1);
strcpy(fieldSchema->fieldType, field->valuestring);
createSTableStrLen += valueLen;
} else {
fieldSchema->fieldType = calloc(11, 1);
strcpy(fieldSchema->fieldType, "binary(32)");
createSTableStrLen += 12;
} }
} else if (field->type == cJSON_False || field->type == cJSON_True) { schema.fields[i] = calloc(nameLen + 1, 1);
fieldSchema->fieldType = calloc(8, 1); strcpy(schema.fields[i], field->valuestring);
strcpy(fieldSchema->fieldType, "tinyint");
createSTableStrLen += 10;
} else {
fieldSchema->fieldType = calloc(7, 1);
strcpy(fieldSchema->fieldType, "double");
createSTableStrLen += 9;
} }
} }
// assembling create stable sql
stable.createSTableStr = calloc((size_t)createSTableStrLen, 1);
strcpy(stable.createSTableStr, "create table if not exists %s.%s(ts timestamp");
int len = (int)strlen(stable.createSTableStr);
for (int i = 0; i < stable.fieldNum; ++i) {
STgField *field = &stable.fields[i];
len += sprintf(stable.createSTableStr + len, ",%s %s", field->fieldAlias, field->fieldType);
}
len += sprintf(stable.createSTableStr + len, ") tags(");
for (int i = 0; i < stable.tagNum; ++i) {
STgTag *tag = &stable.tags[i];
if (i == 0) {
len += sprintf(stable.createSTableStr + len, "%s %s", tag->tagAlias, tag->tagType);
} else {
len += sprintf(stable.createSTableStr + len, ",%s %s", tag->tagAlias, tag->tagType);
}
}
sprintf(stable.createSTableStr + len, ")");
ParseEnd: ParseEnd:
if (parsedOk) { if (parsedOk) {
taosAddStrHash(tgSchemaHash, stable.stName, (char *)(&stable)); tgSchemas.schemas[tgSchemas.size++] = schema;
} else { } else {
if (stable.stName != NULL) { tgFreeSchema(&schema);
free(stable.stName);
}
if (stable.stAlias != NULL) {
free(stable.stName);
}
for (int i = 0; i < stable.tagNum; ++i) {
if (stable.tags[i].tagName != NULL) {
free(stable.tags[i].tagName);
}
if (stable.tags[i].tagAlias != NULL) {
free(stable.tags[i].tagAlias);
}
if (stable.tags[i].tagType != NULL) {
free(stable.tags[i].tagType);
}
}
if (stable.tags != NULL) {
free(stable.tags);
}
for (int i = 0; i < stable.fieldNum; ++i) {
if (stable.fields[i].fieldName != NULL) {
free(stable.fields[i].fieldName);
}
if (stable.fields[i].fieldAlias != NULL) {
free(stable.fields[i].fieldAlias);
}
if (stable.fields[i].fieldType != NULL) {
free(stable.fields[i].fieldType);
}
}
if (stable.fields != NULL) {
free(stable.fields);
}
if (stable.createSTableStr != NULL) {
free(stable.createSTableStr);
}
} }
} }
int tgReadSchema(const char *fileName) { int tgParseSchema(const char *content, char*fileName) {
FILE *fp = fopen(fileName, "r");
if (fp == NULL) {
//httpTrace("failed to open telegraf schema config file:%s, use default schema", fileName);
return -1;
}
httpPrint("open telegraf schema config file:%s successfully", fileName);
fseek(fp, 0, SEEK_END);
size_t contentSize = (size_t)ftell(fp);
rewind(fp);
char * content = (char *)calloc(contentSize * sizeof(char) + 1, 1);
size_t result = fread(content, 1, contentSize, fp);
if (result != contentSize) {
httpError("failed to read telegraf schema config file:%s, use default schema", fileName);
return -1;
}
cJSON *root = cJSON_Parse(content); cJSON *root = cJSON_Parse(content);
if (root == NULL) { if (root == NULL) {
httpError("failed to parse telegraf schema config file:%s, invalid json format", fileName); httpError("failed to parse telegraf schema file:%s, invalid json format, content:%s", fileName, content);
return -1; return -1;
} }
int size = 0;
cJSON *metrics = cJSON_GetObjectItem(root, "metrics"); cJSON *metrics = cJSON_GetObjectItem(root, "metrics");
if (metrics != NULL) { if (metrics != NULL) {
int size = cJSON_GetArraySize(metrics); size = cJSON_GetArraySize(metrics);
if (size <= 0) { if (size <= 0) {
httpError("failed to parse telegraf schema config file:%s, metrics size is 0", fileName); httpError("failed to parse telegraf schema file:%s, metrics size is 0", fileName);
cJSON_Delete(root); cJSON_Delete(root);
return -1; return -1;
} }
tgInitSchemas(size);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
cJSON *metric = cJSON_GetArrayItem(metrics, i); cJSON *metric = cJSON_GetArrayItem(metrics, i);
if (metric != NULL) { if (metric != NULL) {
tgReadSchemaMetric(metric); tgParseSchemaMetric(metric);
} }
} }
} else { } else {
tgReadSchemaMetric(root); size = 1;
tgInitSchemas(size);
tgParseSchemaMetric(root);
} }
cJSON_Delete(root); cJSON_Delete(root);
return size;
}
int tgReadSchema(const char *fileName) {
FILE *fp = fopen(fileName, "r");
if (fp == NULL) {
return -1;
}
httpPrint("open telegraf schema file:%s success", fileName);
fseek(fp, 0, SEEK_END);
size_t contentSize = (size_t)ftell(fp);
rewind(fp);
char *content = (char *)calloc(contentSize * sizeof(char) + 1, 1);
size_t result = fread(content, 1, contentSize, fp);
if (result != contentSize) {
httpError("failed to read telegraf schema file:%s", fileName);
return -1;
}
int schemaNum = tgParseSchema(content, fileName);
free(content); free(content);
fclose(fp); fclose(fp);
httpPrint("parse telegraf schema file:%s, schema size:%d", fileName, schemaNum);
httpPrint("parse telegraf schema config file:%s successfully, stable schema size:%d", fileName); return schemaNum;
return 0;
} }
/*
* in case of file not exist
* we use default schema:
* such as:
* diskio
* mem
* processes
* procstat
* system
* disk
* swap
* kernel
*/
void tgInitHandle(HttpServer *pServer) { void tgInitHandle(HttpServer *pServer) {
tgSchemaHash = taosInitStrHash(100, sizeof(STgStable), taosHashStringStep1);
char fileName[256] = {0}; char fileName[256] = {0};
sprintf(fileName, "%s/taos.telegraf.cfg", configDir); sprintf(fileName, "%s/taos.telegraf.cfg", configDir);
if (tgReadSchema(fileName) == -1) { if (tgReadSchema(fileName) <= 0) {
taosCleanUpStrHash(tgSchemaHash); tgFreeSchemas();
tgSchemaHash = NULL; if (tgParseSchema(DEFAULT_TELEGRAF_CFG, "default") <= 0) {
tgFreeSchemas();
}
} }
httpAddMethod(pServer, &tgDecodeMethod); httpAddMethod(pServer, &tgDecodeMethod);
} }
...@@ -481,23 +322,57 @@ char *tgGetDbFromUrl(HttpContext *pContext) { ...@@ -481,23 +322,57 @@ char *tgGetDbFromUrl(HttpContext *pContext) {
return pParser->path[TG_DB_URL_POS].pos; return pParser->path[TG_DB_URL_POS].pos;
} }
char *tgGetStableName(char *stname, cJSON *fields, int fieldsSize) {
for (int s = 0; s < tgSchemas.size; ++s) {
STgSchema *schema = &tgSchemas.schemas[s];
if (strcasecmp(schema->name, stname) != 0) {
continue;
}
bool schemaMatched = true;
for (int f = 0; f < schema->fieldNum; ++f) {
char *fieldName = schema->fields[f];
bool fieldMatched = false;
for (int i = 0; i < fieldsSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (strcasecmp(field->string, fieldName) == 0) {
fieldMatched = true;
break;
}
}
if (!fieldMatched) {
schemaMatched = false;
break;
}
}
if (schemaMatched) {
return schema->tbName;
}
}
return stname;
}
/* /*
* parse single metric * parse single metric
{ {
"fields": { "fields": {
"field_1": 30, "field_1": 30,
"field_2": 4, "field_2": 4,
"field_N": 59, "field_N": 59,
"n_images": 660 "n_images": 660
}, },
"name": "docker", "name": "docker",
"tags": { "tags": {
"host": "raynor" "host": "raynor"
}, },
"timestamp": 1458229140 "timestamp": 1458229140
} }
*/ */
bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric, char *db) { bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
// metric name // metric name
cJSON *name = cJSON_GetObjectItem(metric, "name"); cJSON *name = cJSON_GetObjectItem(metric, "name");
if (name == NULL) { if (name == NULL) {
...@@ -698,7 +573,7 @@ bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric, ...@@ -698,7 +573,7 @@ bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric,
table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint);
// stable name // stable name
char *stname = name->valuestring; char *stname = tgGetStableName(name->valuestring, fields, fieldsSize);
table_cmd->metric = stable_cmd->metric = httpAddToSqlCmdBuffer(pContext, "%s", stname); table_cmd->metric = stable_cmd->metric = httpAddToSqlCmdBuffer(pContext, "%s", stname);
if (tsTelegrafUseFieldNum == 0) { if (tsTelegrafUseFieldNum == 0) {
table_cmd->stable = stable_cmd->stable = httpAddToSqlCmdBuffer(pContext, "%s", stname); table_cmd->stable = stable_cmd->stable = httpAddToSqlCmdBuffer(pContext, "%s", stname);
...@@ -851,267 +726,6 @@ bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric, ...@@ -851,267 +726,6 @@ bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric,
return true; return true;
} }
bool tgProcessSingleMetricUseConfigSchema(HttpContext *pContext, cJSON *metric, char *db) {
// metric name
cJSON *name = cJSON_GetObjectItem(metric, "name");
if (name == NULL) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_NULL);
return false;
}
if (name->type != cJSON_String) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_TYPE);
return false;
}
if (name->valuestring == NULL) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL);
return false;
}
int nameLen = (int)strlen(name->valuestring);
if (nameLen == 0) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL);
return false;
}
STgStable *stable = (STgStable *)taosGetStrHashData(tgSchemaHash, name->valuestring);
if (stable == NULL) {
httpSendErrorResp(pContext, HTTP_TG_STABLE_NOT_EXIST);
return false;
}
// timestamp
cJSON *timestamp = cJSON_GetObjectItem(metric, "timestamp");
if (timestamp == NULL) {
httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_NULL);
return false;
}
if (timestamp->type != cJSON_Number) {
httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_TYPE);
return false;
}
if (timestamp->valueint <= 0) {
httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_VAL_NULL);
return false;
}
// tags
cJSON *tags = cJSON_GetObjectItem(metric, "tags");
if (tags == NULL) {
httpSendErrorResp(pContext, HTTP_TG_TAGS_NULL);
return false;
}
int tagsSize = cJSON_GetArraySize(tags);
if (tagsSize <= 0) {
httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_0);
return false;
}
for (int i = 0; i < tagsSize; i++) {
cJSON *tag = cJSON_GetArrayItem(tags, i);
if (tag == NULL) {
httpSendErrorResp(pContext, HTTP_TG_TAG_NULL);
return false;
}
if (tag->string == NULL || strlen(tag->string) == 0) {
httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_NULL);
return false;
}
if (tag->type != cJSON_Number && tag->type != cJSON_String) {
httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_TYPE);
return false;
}
if (tag->type == cJSON_String) {
if (tag->valuestring == NULL || strlen(tag->valuestring) == 0) {
httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_NULL);
return false;
}
}
}
// fields
cJSON *fields = cJSON_GetObjectItem(metric, "fields");
if (fields == NULL) {
httpSendErrorResp(pContext, HTTP_TG_FIELDS_NULL);
return false;
}
int fieldsSize = cJSON_GetArraySize(fields);
if (fieldsSize <= 0) {
httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_0);
return false;
}
for (int i = 0; i < fieldsSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (field == NULL) {
httpSendErrorResp(pContext, HTTP_TG_FIELD_NULL);
return false;
}
if (field->string == NULL || strlen(field->string) == 0) {
httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_NULL);
return false;
}
if (field->type != cJSON_Number && field->type != cJSON_String) {
httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_TYPE);
return false;
}
if (field->type == cJSON_String) {
if (field->valuestring == NULL || strlen(field->valuestring) == 0) {
httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_NULL);
return false;
}
}
}
// assembling cmds
HttpSqlCmd *stable_cmd = httpNewSqlCmd(pContext);
if (stable_cmd == NULL) {
httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
return false;
}
stable_cmd->cmdType = HTTP_CMD_TYPE_CREATE_STBALE;
stable_cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN;
HttpSqlCmd *table_cmd = httpNewSqlCmd(pContext);
if (table_cmd == NULL) {
httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
return false;
}
table_cmd->cmdType = HTTP_CMD_TYPE_INSERT;
table_cmd->tagNum = stable_cmd->tagNum = (int8_t)stable->tagNum;
table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint);
// stable name
char *stname = stable->stAlias;
table_cmd->metric = stable_cmd->metric = httpAddToSqlCmdBuffer(pContext, "%s", stname);
table_cmd->stable = stable_cmd->stable = httpAddToSqlCmdBuffer(pContext, "%s", stname);
table_cmd->stable = stable_cmd->stable =
httpShrinkTableName(pContext, table_cmd->stable, httpGetCmdsString(pContext, table_cmd->stable));
// stable tag for detail
for (int ts = 0; ts < stable->tagNum; ++ts) {
STgTag *tagSchema = &stable->tags[ts];
bool tagParsed = false;
for (int tt = 0; tt < tagsSize; ++tt) {
cJSON *tag = cJSON_GetArrayItem(tags, tt);
if (strcasecmp(tag->string, tagSchema->tagName) != 0) {
continue;
}
stable_cmd->tagNames[ts] = table_cmd->tagNames[ts] = httpAddToSqlCmdBuffer(pContext, tagSchema->tagAlias);
if (tag->type == cJSON_String) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] =
httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring);
tagParsed = true;
} else if (tag->type == cJSON_Number) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint);
tagParsed = true;
} else if (tag->type == cJSON_True) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "1");
tagParsed = true;
} else if (tag->type == cJSON_False) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "0");
tagParsed = true;
} else {
}
break;
}
if (!tagParsed) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "NULL");
}
}
// table name
table_cmd->table = stable_cmd->table = httpAddToSqlCmdBufferNoTerminal(pContext, "%s", stname);
for (int ts = 0; ts < stable->tagNum; ++ts) {
STgTag *tagSchema = &stable->tags[ts];
bool tagParsed = false;
for (int tt = 0; tt < tagsSize; ++tt) {
cJSON *tag = cJSON_GetArrayItem(tags, tt);
if (strcasecmp(tag->string, tagSchema->tagName) != 0) {
continue;
}
if (tag->type == cJSON_String) {
httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring);
tagParsed = true;
} else if (tag->type == cJSON_Number) {
httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint);
tagParsed = true;
} else if (tag->type == cJSON_True) {
httpAddToSqlCmdBufferNoTerminal(pContext, "_1");
tagParsed = true;
} else if (tag->type == cJSON_False) {
httpAddToSqlCmdBufferNoTerminal(pContext, "_0");
tagParsed = true;
} else {
}
break;
}
if (!tagParsed) {
stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBufferNoTerminal(pContext, "_n");
}
}
httpAddToSqlCmdBuffer(pContext, "");
table_cmd->table = stable_cmd->table =
httpShrinkTableName(pContext, table_cmd->table, httpGetCmdsString(pContext, table_cmd->table));
// assembling create stable sql
stable_cmd->sql =
httpAddToSqlCmdBuffer(pContext, stable->createSTableStr, db, httpGetCmdsString(pContext, table_cmd->stable));
// assembling insert sql
table_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "import into %s.%s using %s.%s tags(", db,
httpGetCmdsString(pContext, table_cmd->table), db,
httpGetCmdsString(pContext, table_cmd->stable));
for (int ts = 0; ts < stable->tagNum; ++ts) {
if (ts != 0) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",%s", httpGetCmdsString(pContext, stable_cmd->tagValues[ts]));
} else {
httpAddToSqlCmdBufferNoTerminal(pContext, "%s", httpGetCmdsString(pContext, stable_cmd->tagValues[ts]));
}
}
httpAddToSqlCmdBufferNoTerminal(pContext, ") values(%ld", timestamp->valueint);
// stable tag for detail
for (int fs = 0; fs < stable->fieldNum; ++fs) {
STgField *fieldSchema = &stable->fields[fs];
bool fieldParsed = false;
for (int ff = 0; ff < fieldsSize; ++ff) {
cJSON *field = cJSON_GetArrayItem(fields, ff);
if (strcasecmp(field->string, fieldSchema->fieldName) != 0) {
continue;
}
if (field->type == cJSON_String) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",\"%s\"", field->valuestring);
fieldParsed = true;
} else if (field->type == cJSON_Number) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",%lf", field->valuedouble);
fieldParsed = true;
} else if (field->type == cJSON_True) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",1");
fieldParsed = true;
} else if (field->type == cJSON_False) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",0");
fieldParsed = true;
} else {
}
break;
}
if (!fieldParsed) {
httpAddToSqlCmdBufferNoTerminal(pContext, ",NULL");
}
}
httpAddToSqlCmdBuffer(pContext, ")");
return true;
}
/** /**
* request from telegraf 1.7.0 * request from telegraf 1.7.0
* single request: * single request:
...@@ -1213,16 +827,9 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) { ...@@ -1213,16 +827,9 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
cJSON *metric = cJSON_GetArrayItem(metrics, i); cJSON *metric = cJSON_GetArrayItem(metrics, i);
if (metric != NULL) { if (metric != NULL) {
if (tgSchemaHash != NULL) { if (!tgProcessSingleMetric(pContext, metric, db)) {
if (!tgProcessSingleMetricUseConfigSchema(pContext, metric, db)) { cJSON_Delete(root);
cJSON_Delete(root); return false;
return false;
}
} else {
if (!tgProcessSingleMetricUseDefaultSchema(pContext, metric, db)) {
cJSON_Delete(root);
return false;
}
} }
} }
} }
...@@ -1245,16 +852,9 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) { ...@@ -1245,16 +852,9 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN; cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN;
cmd->sql = httpAddToSqlCmdBuffer(pContext, "create database if not exists %s", db); cmd->sql = httpAddToSqlCmdBuffer(pContext, "create database if not exists %s", db);
if (tgSchemaHash != NULL) { if (!tgProcessSingleMetric(pContext, root, db)) {
if (!tgProcessSingleMetricUseConfigSchema(pContext, root, db)) { cJSON_Delete(root);
cJSON_Delete(root); return false;
return false;
}
} else {
if (!tgProcessSingleMetricUseDefaultSchema(pContext, root, db)) {
cJSON_Delete(root);
return false;
}
} }
} }
...@@ -1268,9 +868,6 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) { ...@@ -1268,9 +868,6 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
} }
bool tgProcessRquest(struct HttpContext *pContext) { bool tgProcessRquest(struct HttpContext *pContext) {
tgGetUserFromUrl(pContext);
tgGetPassFromUrl(pContext);
if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) { if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) {
httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR); httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR);
return false; return false;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册