提交 5ce81021 编写于 作者: 1 151250176

Merge branch 'master' of https://github.com/apache/incubator-iotdb into xuekaifeng-spark-connector

# Conflicts:
#	tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TsFileMetaData.java
#	tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
......@@ -96,8 +96,8 @@ then by default `IOTDB_HOME` is the direct parent directory of `sbin/start-serve
(or that of `sbin\start-server.bat` on Windows).
* if `IOTDB_CLI_HOME` is not explicitly assigned,
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-client.sh` on
Unix/OS X (or that of `sbin\start-client.bat` on Windows).
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-cli.sh` on
Unix/OS X (or that of `sbin\start-cli.bat` on Windows).
If you are not the first time that building IoTDB, remember deleting the following files:
......@@ -199,10 +199,10 @@ After build, the IoTDB client will be at the folder "client/target/iotdb-client-
```
# Unix/OS X
> $IOTDB_CLI_HOME/sbin/start-client.sh -h <IP> -p <PORT> -u <USER_NAME>
> $IOTDB_CLI_HOME/sbin/start-cli.sh -h <IP> -p <PORT> -u <USER_NAME>
# Windows
> $IOTDB_CLI_HOME\sbin\start-client.bat -h <IP> -p <PORT> -u <USER_NAME>
> $IOTDB_CLI_HOME\sbin\start-cli.bat -h <IP> -p <PORT> -u <USER_NAME>
```
> NOTE: In the system, we set a default user in IoTDB named 'root'. The default password for 'root' is 'root'. You can use this default user if you are making the first try or you didn't create users by yourself.
......
......@@ -130,7 +130,7 @@ If you use the previous unofficial version 0.7.0. It is incompatible with 0.8.0.
* IOTDB-44 Error message in server log when select timeseries
* IOTDB-49 Authorizer module outputs too many debug log info
* IOTDB-50 DataSetWithoutTimeGenerator's initHeap behaves wrongly
* IOTDB-52 Client doesn't support aggregate
* IOTDB-52 Cli doesn't support aggregate
* IOTDB-54 Predicates doesn't take effect
* IOTDB-67 ValueDecoder reading new page bug
* IOTDB-70 Disconnect from server when logging in fails
......@@ -147,7 +147,7 @@ If you use the previous unofficial version 0.7.0. It is incompatible with 0.8.0.
* IOTDB-103 Does not give a hint when encountering unsupported data types
* IOTDB-104 MManager is incorrectly recovered when system reboots
* IOTDB-108 Mistakes in documents
* IOTDB-110 Clients inserts data normally even if there is no space left on the disk
* IOTDB-110 Clis inserts data normally even if there is no space left on the disk
* IOTDB-118 When the disk space is full, the storage group is created successfully
* IOTDB-121 A bug of query on value columns
* IOTDB-128 Probably a bug in iotdb official website
......
......@@ -29,7 +29,6 @@
</parent>
<artifactId>iotdb-client</artifactId>
<name>IoTDB Client</name>
<description>A Client tool.</description>
<properties>
<cli.test.skip>false</cli.test.skip>
<cli.it.skip>${cli.test.skip}</cli.it.skip>
......
......@@ -28,7 +28,7 @@ pushd %~dp0..
if NOT DEFINED IOTDB_CLI_HOME set IOTDB_CLI_HOME=%CD%
popd
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cli.client.Client
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.client.Client
if NOT DEFINED JAVA_HOME goto :err
@REM -----------------------------------------------------------------------------
......
......@@ -24,7 +24,7 @@ if [ -z "${IOTDB_CLI_HOME}" ]; then
fi
MAIN_CLASS=org.apache.iotdb.cli.client.Client
MAIN_CLASS=org.apache.iotdb.client.Client
CLASSPATH=""
......
......@@ -28,7 +28,7 @@ pushd %~dp0..
if NOT DEFINED IOTDB_CLI_HOME set IOTDB_CLI_HOME=%CD%
popd
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cli.tool.ExportCsv
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ExportCsv
if NOT DEFINED JAVA_HOME goto :err
@REM -----------------------------------------------------------------------------
......
......@@ -47,7 +47,7 @@ for f in ${IOTDB_CLI_HOME}/lib/*.jar; do
CLASSPATH=${CLASSPATH}":"$f
done
MAIN_CLASS=org.apache.iotdb.cli.tool.ExportCsv
MAIN_CLASS=org.apache.iotdb.tool.ExportCsv
"$JAVA" -DIOTDB_CLI_HOME=${IOTDB_CLI_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@"
exit $?
\ No newline at end of file
......@@ -28,7 +28,7 @@ pushd %~dp0..
if NOT DEFINED IOTDB_CLI_HOME set IOTDB_CLI_HOME=%CD%
popd
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cli.tool.ImportCsv
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ImportCsv
if NOT DEFINED JAVA_HOME goto :err
@REM -----------------------------------------------------------------------------
......
......@@ -47,7 +47,7 @@ for f in ${IOTDB_CLI_HOME}/lib/*.jar; do
CLASSPATH=${CLASSPATH}":"$f
done
MAIN_CLASS=org.apache.iotdb.cli.tool.ImportCsv
MAIN_CLASS=org.apache.iotdb.tool.ImportCsv
"$JAVA" -DIOTDB_CLI_HOME=${IOTDB_CLI_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@"
exit $?
\ No newline at end of file
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import java.io.PrintStream;
import java.sql.ResultSet;
......@@ -38,65 +38,64 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.cli.tool.ImportCsv;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.tool.ImportCsv;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.iotdb.jdbc.IoTDBDatabaseMetadata;
import org.apache.iotdb.jdbc.IoTDBMetadataResultSet;
import org.apache.iotdb.jdbc.IoTDBQueryResultSet;
import org.apache.iotdb.jdbc.IoTDBSQLException;
import org.apache.iotdb.service.rpc.thrift.ServerProperties;
import org.apache.thrift.TException;
public abstract class AbstractClient {
protected static final String HOST_ARGS = "h";
protected static final String HOST_NAME = "host";
protected static final String HELP_ARGS = "help";
protected static final String PORT_ARGS = "p";
protected static final String PORT_NAME = "port";
protected static final String PASSWORD_ARGS = "pw";
protected static final String PASSWORD_NAME = "password";
protected static final String USERNAME_ARGS = "u";
protected static final String USERNAME_NAME = "username";
protected static final String EXECUTE_ARGS = "e";
protected static final String EXECUTE_NAME = "execute";
protected static final String ISO8601_ARGS = "disableISO8601";
protected static final List<String> AGGREGRATE_TIME_LIST = new ArrayList<>();
protected static final String MAX_PRINT_ROW_COUNT_ARGS = "maxPRC";
protected static final String MAX_PRINT_ROW_COUNT_NAME = "maxPrintRowCount";
protected static final String SET_MAX_DISPLAY_NUM = "set max_display_num";
protected static final String SET_TIMESTAMP_DISPLAY = "set time_display_type";
protected static final String SHOW_TIMESTAMP_DISPLAY = "show time_display_type";
protected static final String SET_TIME_ZONE = "set time_zone";
protected static final String SHOW_TIMEZONE = "show time_zone";
protected static final String SET_FETCH_SIZE = "set fetch_size";
protected static final String SHOW_FETCH_SIZE = "show fetch_size";
protected static final String HELP = "help";
protected static final String IOTDB_CLI_PREFIX = "IoTDB";
protected static final String SCRIPT_HINT = "./start-client.sh(start-client.bat if Windows)";
protected static final String QUIT_COMMAND = "quit";
protected static final String EXIT_COMMAND = "exit";
protected static final String SHOW_METADATA_COMMAND = "show timeseries";
protected static final int MAX_HELP_CONSOLE_WIDTH = 88;
protected static final String TIMESTAMP_STR = "Time";
protected static final int ISO_DATETIME_LEN = 35;
protected static final String IMPORT_CMD = "import";
static final String HOST_ARGS = "h";
static final String HOST_NAME = "host";
static final String HELP_ARGS = "help";
static final String PORT_ARGS = "p";
static final String PORT_NAME = "port";
static final String PASSWORD_ARGS = "pw";
private static final String PASSWORD_NAME = "password";
static final String USERNAME_ARGS = "u";
static final String USERNAME_NAME = "username";
private static final String EXECUTE_ARGS = "e";
private static final String EXECUTE_NAME = "execute";
static final String ISO8601_ARGS = "disableISO8601";
static final List<String> AGGREGRATE_TIME_LIST = new ArrayList<>();
static final String MAX_PRINT_ROW_COUNT_ARGS = "maxPRC";
private static final String MAX_PRINT_ROW_COUNT_NAME = "maxPrintRowCount";
static final String SET_MAX_DISPLAY_NUM = "set max_display_num";
static final String SET_TIMESTAMP_DISPLAY = "set time_display_type";
static final String SHOW_TIMESTAMP_DISPLAY = "show time_display_type";
static final String SET_TIME_ZONE = "set time_zone";
static final String SHOW_TIMEZONE = "show time_zone";
static final String SET_FETCH_SIZE = "set fetch_size";
static final String SHOW_FETCH_SIZE = "show fetch_size";
private static final String HELP = "help";
static final String IOTDB_CLI_PREFIX = "IoTDB";
static final String SCRIPT_HINT = "./start-client.sh(start-client.bat if Windows)";
static final String QUIT_COMMAND = "quit";
static final String EXIT_COMMAND = "exit";
static final String SHOW_METADATA_COMMAND = "show timeseries";
static final int MAX_HELP_CONSOLE_WIDTH = 88;
static final String TIMESTAMP_STR = "Time";
static final int ISO_DATETIME_LEN = 35;
private static final String IMPORT_CMD = "import";
private static final String NEED_NOT_TO_PRINT_TIMESTAMP = "AGGREGATION";
private static final String DEFAULT_TIME_FORMAT = "default";
protected static String timeFormat = DEFAULT_TIME_FORMAT;
protected static int maxPrintRowCount = 1000;
protected static int fetchSize = 10000;
protected static int maxTimeLength = ISO_DATETIME_LEN;
protected static int maxValueLength = 15;
protected static boolean isQuit = false;
protected static String TIMESTAMP_PRECISION = "ms";
private static String timeFormat = DEFAULT_TIME_FORMAT;
static int maxPrintRowCount = 1000;
private static int fetchSize = 10000;
static int maxTimeLength = ISO_DATETIME_LEN;
static int maxValueLength = 15;
private static boolean isQuit = false;
static String TIMESTAMP_PRECISION = "ms";
/**
* control the width of columns for 'show timeseries path' and 'show storage group'.
......@@ -138,22 +137,22 @@ public abstract class AbstractClient {
* </table>
* </p>
*/
protected static int[] maxValueLengthForShow = new int[]{75, 45, 8, 8};
protected static String formatTime = "%" + maxTimeLength + "s|";
protected static String formatValue = "%" + maxValueLength + "s|";
private static int[] maxValueLengthForShow = new int[]{75, 45, 8, 8};
static String formatTime = "%" + maxTimeLength + "s|";
private static String formatValue = "%" + maxValueLength + "s|";
private static final int DIVIDING_LINE_LENGTH = 40;
protected static String host = "127.0.0.1";
protected static String port = "6667";
protected static String username;
protected static String password;
protected static String execute;
protected static boolean hasExecuteSQL = false;
static String host = "127.0.0.1";
static String port = "6667";
static String username;
static String password;
static String execute;
static boolean hasExecuteSQL = false;
protected static boolean printToConsole = true;
private static boolean printToConsole = true;
protected static Set<String> keywordSet = new HashSet<>();
static Set<String> keywordSet = new HashSet<>();
protected static ServerProperties properties = null;
static ServerProperties properties = null;
private static boolean printHeader = false;
private static int displayCnt = 0;
......@@ -165,7 +164,7 @@ public abstract class AbstractClient {
*/
private static boolean showException = false;
protected static void init() {
static void init() {
keywordSet.add("-" + HOST_ARGS);
keywordSet.add("-" + HELP_ARGS);
keywordSet.add("-" + PORT_ARGS);
......@@ -177,14 +176,14 @@ public abstract class AbstractClient {
}
/**
* CLI result output.
* Client result output.
*
* @param res result set
* @param printToConsole print to console
* @param zoneId time-zone ID
* @throws SQLException SQLException
*/
public static void output(ResultSet res, boolean printToConsole, ZoneId zoneId)
private static void output(ResultSet res, boolean printToConsole, ZoneId zoneId)
throws SQLException {
int cnt = 0;
boolean printTimestamp = true;
......@@ -225,15 +224,14 @@ public abstract class AbstractClient {
}
}
println(StringUtils.repeat('-', DIVIDING_LINE_LENGTH));
printCount(isShow, res, cnt);
}
protected static String getTimestampPrecision() {
private static String getTimestampPrecision() {
return TIMESTAMP_PRECISION;
}
protected static void printCount(boolean isShow, ResultSet res, int cnt) throws SQLException {
private static void printCount(boolean isShow, ResultSet res, int cnt) throws SQLException {
if (isShow) {
int type = res.getType();
if (type == IoTDBMetadataResultSet.MetadataType.STORAGE_GROUP.ordinal()) { // storage group
......@@ -247,7 +245,7 @@ public abstract class AbstractClient {
}
}
protected static void printRow(boolean printTimestamp, int colCount,
private static void printRow(boolean printTimestamp, int colCount,
ResultSetMetaData resultSetMetaData, boolean isShow, ResultSet res, ZoneId zoneId)
throws SQLException {
// Output Labels
......@@ -263,7 +261,7 @@ public abstract class AbstractClient {
}
}
protected static void printHeader(boolean printTimestamp, int colCount,
private static void printHeader(boolean printTimestamp, int colCount,
ResultSetMetaData resultSetMetaData, boolean isShow) throws SQLException {
if (!printHeader) {
printBlockLine(printTimestamp, colCount, resultSetMetaData, isShow);
......@@ -273,7 +271,7 @@ public abstract class AbstractClient {
}
}
protected static void printShow(int colCount, ResultSet res) throws SQLException {
private static void printShow(int colCount, ResultSet res) throws SQLException {
print("|");
for (int i = 1; i <= colCount; i++) {
formatValue = "%" + maxValueLengthForShow[i - 1] + "s|";
......@@ -282,7 +280,7 @@ public abstract class AbstractClient {
println();
}
protected static void printRowData(boolean printTimestamp, ResultSet res, ZoneId zoneId,
private static void printRowData(boolean printTimestamp, ResultSet res, ZoneId zoneId,
ResultSetMetaData resultSetMetaData, int colCount)
throws SQLException {
if (displayCnt < maxPrintRowCount) { // NOTE displayCnt only works on queried data results
......@@ -298,7 +296,7 @@ public abstract class AbstractClient {
}
}
protected static void printColumnData(ResultSetMetaData resultSetMetaData, ResultSet res, int i,
private static void printColumnData(ResultSetMetaData resultSetMetaData, ResultSet res, int i,
ZoneId zoneId) throws SQLException {
boolean flag = false;
for (String timeStr : AGGREGRATE_TIME_LIST) {
......@@ -319,7 +317,7 @@ public abstract class AbstractClient {
}
}
protected static Options createOptions() {
static Options createOptions() {
Options options = new Options();
Option help = new Option(HELP_ARGS, false, "Display help information(optional)");
help.setRequired(false);
......@@ -420,7 +418,7 @@ public abstract class AbstractClient {
}
}
protected static String checkRequiredArg(String arg, String name, CommandLine commandLine,
static String checkRequiredArg(String arg, String name, CommandLine commandLine,
boolean isRequired,
String defaultValue) throws ArgsErrorException {
String str = commandLine.getOptionValue(arg);
......@@ -442,7 +440,7 @@ public abstract class AbstractClient {
return str;
}
protected static void setTimeFormat(String newTimeFormat) {
static void setTimeFormat(String newTimeFormat) {
switch (newTimeFormat.trim().toLowerCase()) {
case "long":
case "number":
......@@ -475,7 +473,7 @@ public abstract class AbstractClient {
}
}
protected static void setMaxDisplayNumber(String maxDisplayNum) {
static void setMaxDisplayNumber(String maxDisplayNum) {
long tmp = Long.parseLong(maxDisplayNum.trim());
if (tmp > Integer.MAX_VALUE || tmp < 0) {
maxPrintRowCount = Integer.MAX_VALUE;
......@@ -484,7 +482,7 @@ public abstract class AbstractClient {
}
}
protected static void printBlockLine(boolean printTimestamp, int colCount,
private static void printBlockLine(boolean printTimestamp, int colCount,
ResultSetMetaData resultSetMetaData,
boolean isShowTs) throws SQLException {
StringBuilder blockLine = new StringBuilder();
......@@ -512,7 +510,7 @@ public abstract class AbstractClient {
println(blockLine);
}
protected static void printName(boolean printTimestamp, int colCount,
private static void printName(boolean printTimestamp, int colCount,
ResultSetMetaData resultSetMetaData,
boolean isShowTs) throws SQLException {
print("|");
......@@ -533,7 +531,7 @@ public abstract class AbstractClient {
println();
}
protected static String[] removePasswordArgs(String[] args) {
static String[] removePasswordArgs(String[] args) {
int index = -1;
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-" + PASSWORD_ARGS)) {
......@@ -548,7 +546,7 @@ public abstract class AbstractClient {
return args;
}
protected static String[] processExecuteArgs(String[] args) {
static String[] processExecuteArgs(String[] args) {
int index = -1;
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-" + EXECUTE_ARGS)) {
......@@ -574,7 +572,7 @@ public abstract class AbstractClient {
}
}
protected static void displayLogo(String version) {
static void displayLogo(String version) {
println(" _____ _________ ______ ______ \n"
+ "|_ _| | _ _ ||_ _ `.|_ _ \\ \n"
+ " | | .--.|_/ | | \\_| | | `. \\ | |_) | \n"
......@@ -584,13 +582,13 @@ public abstract class AbstractClient {
+ " \n");
}
protected static void echoStarting(){
static void echoStarting(){
println("---------------------");
println("Starting IoTDB Client");
println("---------------------");
}
protected static OperationResult handleInputCmd(String cmd, IoTDBConnection connection) {
static OperationResult handleInputCmd(String cmd, IoTDBConnection connection) {
String specialCmd = cmd.toLowerCase().trim();
if (QUIT_COMMAND.equals(specialCmd) || EXIT_COMMAND.equals(specialCmd)) {
......@@ -647,7 +645,7 @@ public abstract class AbstractClient {
return OperationResult.NO_OPER;
}
protected static void showHelp() {
private static void showHelp() {
println(" <your-sql>\t\t\t execute your sql statment");
println(String.format(" %s\t\t show how many timeseries are in iotdb",
SHOW_METADATA_COMMAND));
......@@ -665,7 +663,7 @@ public abstract class AbstractClient {
SET_MAX_DISPLAY_NUM));
}
protected static void showMetaData(IoTDBConnection connection) {
private static void showMetaData(IoTDBConnection connection) {
try {
println(((IoTDBDatabaseMetadata) connection.getMetaData()).getMetadataInJson());
} catch (SQLException e) {
......@@ -674,7 +672,7 @@ public abstract class AbstractClient {
}
}
protected static void setTimestampDisplay(String specialCmd, String cmd) {
private static void setTimestampDisplay(String specialCmd, String cmd) {
String[] values = specialCmd.split("=");
if (values.length != 2) {
println(String.format("Time display format error, please input like %s=ISO8601",
......@@ -691,7 +689,7 @@ public abstract class AbstractClient {
println("Time display type has set to " + cmd.split("=")[1].trim());
}
protected static void setTimeZone(String specialCmd, String cmd, IoTDBConnection connection) {
private static void setTimeZone(String specialCmd, String cmd, IoTDBConnection connection) {
String[] values = specialCmd.split("=");
if (values.length != 2) {
println(
......@@ -708,7 +706,7 @@ public abstract class AbstractClient {
println("Time zone has set to " + values[1].trim());
}
protected static void setFetchSize(String specialCmd, String cmd) {
private static void setFetchSize(String specialCmd, String cmd) {
String[] values = specialCmd.split("=");
if (values.length != 2) {
println(String
......@@ -725,7 +723,7 @@ public abstract class AbstractClient {
println("Fetch size has set to " + values[1].trim());
}
protected static void setMaxDisplaNum(String specialCmd, String cmd) {
private static void setMaxDisplaNum(String specialCmd, String cmd) {
String[] values = specialCmd.split("=");
if (values.length != 2) {
println(String.format("Max display number format error, please input like %s = 10000",
......@@ -742,7 +740,7 @@ public abstract class AbstractClient {
println("Max display number has set to " + values[1].trim());
}
protected static void showTimeZone(IoTDBConnection connection) {
private static void showTimeZone(IoTDBConnection connection) {
try {
println("Current time zone: " + connection.getTimeZone());
} catch (Exception e) {
......@@ -751,7 +749,7 @@ public abstract class AbstractClient {
}
}
protected static void importCmd(String specialCmd, String cmd, IoTDBConnection connection) {
private static void importCmd(String specialCmd, String cmd, IoTDBConnection connection) {
String[] values = specialCmd.split(" ");
if (values.length != 2) {
println("Please input like: import /User/myfile. "
......@@ -772,12 +770,10 @@ public abstract class AbstractClient {
}
}
protected static void executeQuery(IoTDBConnection connection, String cmd) {
Statement statement = null;
private static void executeQuery(IoTDBConnection connection, String cmd) {
long startTime = System.currentTimeMillis();
try {
try (Statement statement = connection.createStatement();) {
ZoneId zoneId = ZoneId.of(connection.getTimeZone());
statement = connection.createStatement();
statement.setFetchSize(fetchSize);
boolean hasResultSet = statement.execute(cmd.trim());
if (hasResultSet) {
......@@ -790,15 +786,6 @@ public abstract class AbstractClient {
} catch (Exception e) {
println("Msg: " + e.getMessage());
handleException(e);
} finally {
if (statement != null) {
try {
statement.close();
} catch (SQLException e) {
println("Cannot close statement because: " + e.getMessage());
handleException(e);
}
}
}
long costTime = System.currentTimeMillis() - startTime;
println(String.format("It costs %.3fs", costTime / 1000.0));
......@@ -808,27 +795,27 @@ public abstract class AbstractClient {
STOP_OPER, CONTINUE_OPER, NO_OPER
}
protected static void printf(String format, Object... args) {
private static void printf(String format, Object... args) {
SCREEN_PRINTER.printf(format, args);
}
protected static void print(String msg) {
static void print(String msg) {
SCREEN_PRINTER.print(msg);
}
protected static void println() {
private static void println() {
SCREEN_PRINTER.println();
}
protected static void println(String msg) {
static void println(String msg) {
SCREEN_PRINTER.println(msg);
}
protected static void println(Object obj) {
private static void println(Object obj) {
SCREEN_PRINTER.println(obj);
}
protected static void handleException(Exception e) {
static void handleException(Exception e) {
if (showException) {
e.printStackTrace(SCREEN_PRINTER);
}
......
......@@ -16,12 +16,11 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import java.io.IOException;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Arrays;
import jline.console.ConsoleReader;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
......@@ -29,17 +28,20 @@ import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.Config;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.thrift.TException;
/**
* usage: -h 127.0.0.1 -p 6667 -u root -pw root
*/
public class Client extends AbstractClient {
private static CommandLine commandLine;
/**
* IoTDB CLI main function.
* IoTDB Client main function.
*
* @param args launch arguments
* @throws ClassNotFoundException ClassNotFoundException
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import java.io.Console;
import java.sql.DriverManager;
......@@ -28,7 +28,7 @@ import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.Config;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.thrift.TException;
......@@ -71,7 +71,7 @@ public class WinClient extends AbstractClient {
private static String readPassword() {
Console c = System.console();
if (c == null) { // IN ECLIPSE IDE
if (c == null) { // IN ECLIENTPSE IDE
print(IOTDB_CLI_PREFIX + "> please input password: ");
Scanner scanner = new Scanner(System.in);
return scanner.nextLine();
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.exception;
package org.apache.iotdb.exception;
public class ArgsErrorException extends Exception {
......
......@@ -16,13 +16,13 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.tool;
package org.apache.iotdb.tool;
import java.io.IOException;
import java.time.ZoneId;
import jline.console.ConsoleReader;
import org.apache.commons.cli.CommandLine;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.iotdb.jdbc.IoTDBSQLException;
import org.apache.thrift.TException;
......
......@@ -17,7 +17,7 @@
* under the License.
*/
package org.apache.iotdb.cli.tool;
package org.apache.iotdb.tool;
import java.io.BufferedReader;
import java.io.BufferedWriter;
......@@ -41,8 +41,8 @@ import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.iotdb.cli.client.AbstractClient;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.client.AbstractClient;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.Config;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.thrift.TException;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.tool;
package org.apache.iotdb.tool;
import java.io.BufferedReader;
import java.io.BufferedWriter;
......@@ -46,13 +46,11 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.Config;
import org.apache.iotdb.jdbc.Constant;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* read a CSV formatted data File and insert all the data into IoTDB.
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
......@@ -27,8 +27,8 @@ import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.iotdb.cli.client.AbstractClient.OperationResult;
import org.apache.iotdb.cli.exception.ArgsErrorException;
import org.apache.iotdb.client.AbstractClient.OperationResult;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.iotdb.jdbc.IoTDBDatabaseMetadata;
import org.junit.After;
......@@ -143,30 +143,46 @@ public class AbstractClientIT {
@Test
public void testHandleInputInputCmd() {
assertEquals(OperationResult.STOP_OPER, AbstractClient.handleInputCmd(AbstractClient.EXIT_COMMAND, connection));
assertEquals(OperationResult.STOP_OPER, AbstractClient.handleInputCmd(AbstractClient.QUIT_COMMAND, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(AbstractClient.SHOW_METADATA_COMMAND, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=xxx", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=default", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
assertEquals(OperationResult.STOP_OPER, AbstractClient
.handleInputCmd(AbstractClient.EXIT_COMMAND, connection));
assertEquals(OperationResult.STOP_OPER, AbstractClient
.handleInputCmd(AbstractClient.QUIT_COMMAND, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(AbstractClient.SHOW_METADATA_COMMAND, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=xxx", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=default", AbstractClient.SET_TIMESTAMP_DISPLAY), connection));
testSetTimeFormat();
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=", AbstractClient.SET_MAX_DISPLAY_NUM), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=xxx", AbstractClient.SET_MAX_DISPLAY_NUM),connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=1", AbstractClient.SET_MAX_DISPLAY_NUM), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=", AbstractClient.SET_MAX_DISPLAY_NUM), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=xxx", AbstractClient.SET_MAX_DISPLAY_NUM),connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=1", AbstractClient.SET_MAX_DISPLAY_NUM), connection));
testSetMaxDisplayNumber();
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(AbstractClient.SHOW_TIMEZONE, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(AbstractClient.SHOW_TIMESTAMP_DISPLAY, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(AbstractClient.SHOW_FETCH_SIZE, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=", AbstractClient.SET_TIME_ZONE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=+08:00", AbstractClient.SET_TIME_ZONE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=", AbstractClient.SET_FETCH_SIZE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient.handleInputCmd(String.format("%s=111", AbstractClient.SET_FETCH_SIZE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(AbstractClient.SHOW_TIMEZONE, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(AbstractClient.SHOW_TIMESTAMP_DISPLAY, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(AbstractClient.SHOW_FETCH_SIZE, connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=", AbstractClient.SET_TIME_ZONE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=+08:00", AbstractClient.SET_TIME_ZONE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=", AbstractClient.SET_FETCH_SIZE), connection));
assertEquals(OperationResult.CONTINUE_OPER, AbstractClient
.handleInputCmd(String.format("%s=111", AbstractClient.SET_FETCH_SIZE), connection));
}
private void testSetTimeFormat() {
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import static org.junit.Assert.assertEquals;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.client;
package org.apache.iotdb.client;
import java.io.File;
import java.io.IOException;
......
......@@ -16,11 +16,11 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.tool;
package org.apache.iotdb.tool;
import java.io.File;
import java.io.IOException;
import org.apache.iotdb.cli.client.AbstractScript;
import org.apache.iotdb.client.AbstractScript;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
......
......@@ -16,11 +16,11 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.cli.tool;
package org.apache.iotdb.tool;
import java.io.File;
import java.io.IOException;
import org.apache.iotdb.cli.client.AbstractScript;
import org.apache.iotdb.client.AbstractScript;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
......
......@@ -74,8 +74,8 @@ you can just choose menu "import" -> "Maven" -> "Existing Maven Projects".
## Debugging IoTDB
The main class of IoTDB server is `org.apache.iotdb.db.service.IoTDB`.
The main class of IoTDB client is `org.apache.iotdb.cli.client.Client`
(or `org.apache.iotdb.cli.client.WinClient` on Win OS).
The main class of IoTDB cli is `org.apache.iotdb.client.Client`
(or `org.apache.iotdb.client.WinClient` on Win OS).
You can run/debug IoTDB by using the two classes as the entrance.
......
......@@ -88,12 +88,12 @@ IoTDB为用户提供多种与服务器交互的方式,您可以选择使用Cli
Linux系统与MacOS系统启动命令如下:
```
> $IOTDB_HOME/sbin/start-client.sh -h 127.0.0.1 -p 6667 -u root
> $IOTDB_HOME/sbin/start-cli.sh -h 127.0.0.1 -p 6667 -u root
```
Windows系统启动命令如下:
```
> $IOTDB_HOME\sbin\start-client.bat -h 127.0.0.1 -p 6667 -u root
> $IOTDB_HOME\sbin\start-cli.bat -h 127.0.0.1 -p 6667 -u root
```
回车后输入root用户的密码,即可成功启动客户端。启动后出现如图提示即为启动成功。
......@@ -286,7 +286,7 @@ IoTDB> SELECT * FROM root.ln.wf01.wt01
+-----------------------+--------------------------+-----------------------------+
```
输入quit或exit可退出Client结束本次会话,Client输出quit normally表示退出成功,操作语句与返回结果如下:
输入quit或exit可退出Cli结束本次会话,Cli输出quit normally表示退出成功,操作语句与返回结果如下:
```
IoTDB> quit
quit normally
......
......@@ -57,7 +57,7 @@
为了保证机械手的监控数据能够及时监控和分析,公司需要收集这些机械手传感器信息,将其发送至可以连接外部网络的服务器上,而后将原始数据信息上传到数据中心进行复杂的计算和分析。
此时,可以采用IoTDB套件中的IoTDB、IoTDB-CLI工具、TsFileSync工具和Hadoop/Spark集成组件等。将IoTDB服务器安装在工厂连接外网的服务器上,用户接收机械手传输的数据并将数据上传到数据中心。将IoTDB-CLI工具安装在每一个连接工厂内网的机械手上,用于将传感器产生的实时数据上传到工厂内部服务器。再使用TsFileSync工具将原始数据上传到数据中心。此外还需要部署Hadoop/Spark集群用于数据中心端的数据存储和分析。如图1.6中间场景所示。
此时,可以采用IoTDB套件中的IoTDB、IoTDB-Client工具、TsFileSync工具和Hadoop/Spark集成组件等。将IoTDB服务器安装在工厂连接外网的服务器上,用户接收机械手传输的数据并将数据上传到数据中心。将IoTDB-Client工具安装在每一个连接工厂内网的机械手上,用于将传感器产生的实时数据上传到工厂内部服务器。再使用TsFileSync工具将原始数据上传到数据中心。此外还需要部署Hadoop/Spark集群用于数据中心端的数据存储和分析。如图1.6中间场景所示。
<img style="width:100%; max-width:800px; max-height:600px; margin-left:auto; margin-right:auto; display:block;" src="https://user-images.githubusercontent.com/13203019/51579080-96aba780-1efa-11e9-87ac-940c45b19dd7.jpg">
......@@ -71,7 +71,7 @@
为了能够实时接收汽车传感器所采集的物联网数据,公司需要在车辆行驶的过程中将传感器数据通过窄带物联网实时发送至数据中心,而后在数据中心的服务器上进行复杂的计算和分析。
此时,可以采用IoTDB套件中的IoTDB、IoTDB-CLI和Hadoop/Spark集成组件等。将IoTDB-CLI工具安装在每一辆车联网内的车辆上,使用IoTDB-JDBC工具将数据直接传回数据中心的服务器。
此时,可以采用IoTDB套件中的IoTDB、IoTDB-Client和Hadoop/Spark集成组件等。将IoTDB-Client工具安装在每一辆车联网内的车辆上,使用IoTDB-JDBC工具将数据直接传回数据中心的服务器。
此外还需要部署Hadoop/Spark集群用于数据中心端的数据存储和分析。如图1.8所示。
......
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Chapter6: JDBC API
## Status Code
在最新版本中引入了**状态码**这一概念。例如,因为IoTDB需要在写入数据之前首先注册时间序列,一种可能的解决方案是:
```
try {
writeData();
} catch (SQLException e) {
// the most case is that the time series does not exist
if (e.getMessage().contains("exist")) {
//However, using the content of the error message is not so efficient
registerTimeSeries();
//write data once again
writeData();
}
}
```
利用状态码,我们就可以不必写诸如`if (e.getErrorMessage().contains("exist"))`的代码,只需要使用`e.getStatusType().getCode() == TSStatusType.TIME_SERIES_NOT_EXIST_ERROR.getStatusCode()`
这里是状态码和相对应信息的列表:
|状态码|状态类型|状态信息|
|:---|:---|:---|
|200|SUCCESS_STATUS||
|201|STILL_EXECUTING_STATUS||
|202|INVALID_HANDLE_STATUS||
|301|TIMESERIES_NOT_EXIST_ERROR|时间序列不存在|
|302|UNSUPPORTED_FETCH_METADATA_OPERATION_ERROR|不支持的获取元数据操作|
|303|FETCH_METADATA_ERROR|获取元数据失败|
|304|CHECK_FILE_LEVEL_ERROR|检查文件层级错误|
|400|EXECUTE_STATEMENT_ERROR|执行语句错误|
|401|SQL_PARSE_ERROR|SQL语句分析错误|
|402|GENERATE_TIME_ZONE_ERROR|生成时区错误|
|403|SET_TIME_ZONE_ERROR|设置时区错误|
|500|INTERNAL_SERVER_ERROR|服务器内部错误|
|600|WRONG_LOGIN_PASSWORD_ERROR|用户名或密码错误|
|601|NOT_LOGIN_ERROR|没有登录|
|602|NO_PERMISSION_ERROR|没有操作权限|
|603|UNINITIALIZED_AUTH_ERROR|授权人未初始化|
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# 第7章: Session API
# 使用方式
## 依赖
* JDK >= 1.8
* Maven >= 3.0
## 安装到本地 maven 库
In root directory:
> mvn clean install -pl session -am -Dmaven.test.skip=true
## 在 maven 中使用 session 接口
```
<dependencies>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-session</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
```
## Session 接口使用示例
```Java
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.Statement;
import org.apache.iotdb.session.Session;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.record.RowBatch;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
public class SessionExample {
public static void main(String[] args) throws ClassNotFoundException, IoTDBSessionException {
Session session = new Session("127.0.0.1", 6667, "root", "root");
session.open();
session.setStorageGroup("root.sg1");
session.createTimeseriesResp("root.sg1.d1.s1", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseriesResp("root.sg1.d1.s2", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseriesResp("root.sg1.d1.s3", TSDataType.INT64, TSEncoding.RLE);
Schema schema = new Schema();
schema.registerMeasurement(new MeasurementSchema("s1", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s2", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s3", TSDataType.INT64, TSEncoding.RLE));
RowBatch rowBatch = schema.createRowBatch("root.sg1.d1", 100);
long[] timestamps = rowBatch.timestamps;
Object[] values = rowBatch.values;
for (long time = 0; time < 30000; time++) {
int row = rowBatch.batchSize++;
timestamps[row] = time;
for (int i = 0; i < 3; i++) {
long[] sensor = (long[]) values[i];
sensor[row] = time;
}
if (rowBatch.batchSize == rowBatch.getMaxBatchSize()) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
}
if (rowBatch.batchSize != 0) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
session.close();
}
}
```
> The code is in example/session/src/main/java/org/apache/iotdb/session/SessionExample.java
\ No newline at end of file
......@@ -19,6 +19,6 @@
-->
# Chaper 7: TsFile
# 第8章: TsFile
Coming Soon.
\ No newline at end of file
......@@ -19,6 +19,6 @@
-->
# Chaper 7: TsFile
# 第8章: TsFile
Coming Soon.
\ No newline at end of file
......@@ -19,6 +19,6 @@
-->
# Chaper 7: TsFile
# 第8章: TsFile
Coming Soon.
\ No newline at end of file
......@@ -19,7 +19,7 @@
-->
# 同步工具
# 第9章: 同步工具
<!-- TOC -->
- [同步工具](#同步工具)
......
......@@ -29,7 +29,7 @@
<!-- /TOC -->
# Cli / Shell工具
IOTDB为用户提供CLI/Shell工具用于启动客户端和服务端程序。下面介绍每个CLI/Shell工具的运行方式和相关参数。
IOTDB为用户提供Client/Shell工具用于启动客户端和服务端程序。下面介绍每个Client/Shell工具的运行方式和相关参数。
> \$IOTDB\_HOME表示IoTDB的安装目录所在路径。
## Cli / Shell运行方式
......@@ -38,12 +38,12 @@ IOTDB为用户提供CLI/Shell工具用于启动客户端和服务端程序。下
Linux系统与MacOS系统启动命令如下:
```
Shell > ./sbin/start-client.sh -h 127.0.0.1 -p 6667 -u root -pw root
Shell > ./sbin/start-cli.sh -h 127.0.0.1 -p 6667 -u root -pw root
```
Windows系统启动命令如下:
```
Shell > \sbin\start-client.bat -h 127.0.0.1 -p 6667 -u root -pw root
Shell > \sbin\start-cli.bat -h 127.0.0.1 -p 6667 -u root -pw root
```
回车后即可成功启动客户端。启动后出现如图提示即为启动成功。
```
......@@ -79,22 +79,22 @@ IoTDB>
Linux系统与MacOS系统启动命令如下:
```
Shell >./sbin/start-client.sh -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
Shell >./sbin/start-cli.sh -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
```
Windows系统启动命令如下:
```
Shell > \sbin\start-client.bat -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
Shell > \sbin\start-cli.bat -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
```
## Cli / Shell的-e参数
当您想要通过脚本的方式通过Cli / Shell对IoTDB进行批量操作时,可以使用-e参数。通过使用该参数,您可以在不进入客户端输入模式的情况下操作IoTDB。
为了避免SQL语句和其他参数混淆,现在只支持-e参数作为最后的参数使用。
针对CLI/Shell工具的-e参数用法如下:
针对Client/Shell工具的-e参数用法如下:
```
Shell > ./sbin/start-client.sh -h {host} -p {port} -u {user} -pw {password} -e {sql for iotdb}
Shell > ./sbin/start-cli.sh -h {host} -p {port} -u {user} -pw {password} -e {sql for iotdb}
```
为了更好的解释-e参数的使用,可以参考下面的例子。
......@@ -109,7 +109,7 @@ Windows系统启动命令如下:
4.查询验证数据是否插入成功
那么通过使用CLI/Shell工具的-e参数,可以采用如下的脚本:
那么通过使用Client/Shell工具的-e参数,可以采用如下的脚本:
```
# !/bin/bash
......@@ -119,12 +119,12 @@ port=6667
user=root
pass=root
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "set storage group to root.demo"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "create timeseries root.demo.s1 WITH DATATYPE=INT32, ENCODING=RLE"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(1,10)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(2,11)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(3,12)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "select s1 from root.demo"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "set storage group to root.demo"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "create timeseries root.demo.s1 WITH DATATYPE=INT32, ENCODING=RLE"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(1,10)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(2,11)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(3,12)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "select s1 from root.demo"
```
打印出来的结果显示在下图,通过这种方式进行的操作与客户端的输入模式以及通过JDBC进行操作结果是一致的。
......
......@@ -31,7 +31,7 @@
- Can I use Hadoop and Spark to read TsFile in IoTDB?
- How does IoTDB handle duplicate points?
- How can I tell what type of the specific timeseries?
- How can I change IoTDB's CLI time display format?
- How can I change IoTDB's Client time display format?
<!-- /TOC -->
# Frequently Asked Questions
......@@ -43,19 +43,19 @@ There are several ways to identify the version of IoTDB that you are using:
* Launch IoTDB's Command Line Interface:
```
> ./start-client.sh -p 6667 -pw root -u root -h localhost
> ./start-cli.sh -p 6667 -pw root -u root -h localhost
_____ _________ ______ ______
|_ _| | _ _ ||_ _ `.|_ _ \
| | .--.|_/ | | \_| | | `. \ | |_) |
| | / .'`\ \ | | | | | | | __'.
_| |_| \__. | _| |_ _| |_.' /_| |__) |
|_____|'.__.' |_____| |______.'|_______/ version 0.7.0
|_____|'.__.' |_____| |______.'|_______/ version x.x.x
```
* Check pom.xml file:
```
<version>0.7.0</version>
<version>0.9.0-SNAPSHOT</version>
```
* Use JDBC API:
......@@ -137,11 +137,11 @@ Otherwise, you can also use wildcard in timeseries path:
IoTDB> show timeseries root.fit.d1.*
```
## How can I change IoTDB's CLI time display format?
## How can I change IoTDB's Client time display format?
The default IoTDB's CLI time display format is human readable (e.g. ```1970-01-01T08:00:00.001```), if you want to display time in timestamp type or other readable format, add parameter ```-disableIS08601``` in start command:
The default IoTDB's Client time display format is human readable (e.g. ```1970-01-01T08:00:00.001```), if you want to display time in timestamp type or other readable format, add parameter ```-disableIS08601``` in start command:
```
> $IOTDB_CLI_HOME/sbin/start-client.sh -h 127.0.0.1 -p 6667 -u root -pw root -disableIS08601
> $IOTDB_CLI_HOME/sbin/start-cli.sh -h 127.0.0.1 -p 6667 -u root -pw root -disableIS08601
```
......@@ -30,7 +30,7 @@
- Configure
- Start
- Start Server
- Start Client
- Start Cli
- Have a try
- Stop Server
......@@ -89,8 +89,8 @@ then by default `IOTDB_HOME` is the direct parent directory of `sbin/start-serve
(or that of `sbin\start-server.bat` on Windows).
* if `IOTDB_CLI_HOME` is not explicitly assigned,
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-client.sh` on
Unix/OS X (or that of `sbin\start-client.bat` on Windows).
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-cli.sh` on
Unix/OS X (or that of `sbin\start-cli.bat` on Windows).
If you are not the first time that building IoTDB, remember deleting the following files:
......@@ -170,9 +170,9 @@ After that we start the server. Running the startup script:
> $IOTDB_HOME\sbin\start-server.bat
```
### Start Client
### Start Cli
Now let's trying to read and write some data from IoTDB using our Client. To start the client, you need to explicit the server's IP and PORT as well as the USER_NAME and PASSWORD.
Now let's trying to read and write some data from IoTDB using our Cli. To start the client, you need to explicit the server's IP and PORT as well as the USER_NAME and PASSWORD.
```
# You can first build cli project
......@@ -182,10 +182,10 @@ Now let's trying to read and write some data from IoTDB using our Client. To sta
> mvn clean package -pl client -am -Dmaven.test.skip=true
# Unix/OS X
> $IOTDB_CLI_HOME/sbin/start-client.sh -h <IP> -p <PORT> -u <USER_NAME>
> $IOTDB_CLI_HOME/sbin/start-cli.sh -h <IP> -p <PORT> -u <USER_NAME>
# Windows
> $IOTDB_CLI_HOME\sbin\start-client.bat -h <IP> -p <PORT> -u <USER_NAME>
> $IOTDB_CLI_HOME\sbin\start-cli.bat -h <IP> -p <PORT> -u <USER_NAME>
```
> NOTE: In the system, we set a default user in IoTDB named 'root'. The default password for 'root' is 'root'. You can use this default user if you are making the first try or you didn't create users by yourself.
......
......@@ -49,10 +49,13 @@
* 2-Reference
# Chapter 6: JDBC API
* 1-JDBC API
# Chapter 7: TsFile
* 2-Status Code
# Chapter 7: Session API
* 1-Session API
# Chapter 8: TsFile
* 1-Installation
* 2-Usage
* 3-Hierarchy
# Chapter 8: System Tools
# Chapter 9: System Tools
* 1-Sync
* 2-Memory Estimation Tool
......@@ -61,7 +61,7 @@ In order to ensure that the data of the robot can be monitored and analyzed in t
<img style="width:100%; max-width:800px; max-height:600px; margin-left:auto; margin-right:auto; display:block;" src="https://user-images.githubusercontent.com/13203019/51579080-96aba780-1efa-11e9-87ac-940c45b19dd7.jpg">
At this point, IoTDB, IoTDB-CLI tools, TsFileSync tools, and Hadoop/Spark integration components in the IoTDB suite can be used. IoTDB-CLI tool is installed on the robot and each of them is connected to the LAN of the factory. When sensors generate real-time data, the data will be uploaded to the server in the factory. The IoTDB server and TsFileSync is installed on the server connected to the external network. Once triggered, the data on the server will be upload to the data center. In addition, Hadoop/Spark clusters need to be deployed for data storage and analysis on the data center side. As shown in Figure 1.6. Figure 1.7 shows the architecture at this time.
At this point, IoTDB, IoTDB-Client tools, TsFileSync tools, and Hadoop/Spark integration components in the IoTDB suite can be used. IoTDB-Client tool is installed on the robot and each of them is connected to the LAN of the factory. When sensors generate real-time data, the data will be uploaded to the server in the factory. The IoTDB server and TsFileSync is installed on the server connected to the external network. Once triggered, the data on the server will be upload to the data center. In addition, Hadoop/Spark clusters need to be deployed for data storage and analysis on the data center side. As shown in Figure 1.6. Figure 1.7 shows the architecture at this time.
<img style="width:100%; max-width:800px; max-height:600px; margin-left:auto; margin-right:auto; display:block;" src="https://user-images.githubusercontent.com/13203019/51579085-9dd2b580-1efa-11e9-97b9-f56bc8d342b0.jpg">
......@@ -71,7 +71,7 @@ A car company installed sensors on its cars to collect monitoring information su
In order to receive the IoT data collected by the car sensor in real time, the company needs to send the sensor data to the data center in real time through the narrowband IoT while the vehicle is running. Thus, they can perform complex calculations and analysis on the server in the data center.
At this point, IoTDB, IoTDB-CLI, and Hadoop/Spark integration components in the IoTDB suite can be used. IoTDB-CLI tool is installed on each car and use IoTDB-JDBC tool to send data directly back to the server in the data center.
At this point, IoTDB, IoTDB-Client, and Hadoop/Spark integration components in the IoTDB suite can be used. IoTDB-Client tool is installed on each car and use IoTDB-JDBC tool to send data directly back to the server in the data center.
In addition, Hadoop/Spark clusters need to be deployed for data storage and analysis on the data center side. As shown in Figure 1.8.
......
......@@ -28,7 +28,7 @@ This feature is not supported in version 0.7.0.
### Import Real-time Data
IoTDB provides users with a variety of ways to insert real-time data, such as directly inputting [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1) in [Cli/Shell tools](/#/Tools/Cli), or using [Java JDBC](/#/Documents/0.8.0/chap6/sec1) to perform single or batch execution of [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1).
IoTDB provides users with a variety of ways to insert real-time data, such as directly inputting [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1) in [Client/Shell tools](/#/Tools/Client), or using [Java JDBC](/#/Documents/0.8.0/chap6/sec1) to perform single or batch execution of [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1).
This section mainly introduces the use of [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1) for real-time data import in the scenario. See Section 5.1 for a detailed syntax of [INSERT SQL statement](/#/Documents/0.8.0/chap5/sec1).
......
......@@ -104,8 +104,8 @@ then by default `IOTDB_HOME` is the direct parent directory of `sbin/start-serve
(or that of `sbin\start-server.bat` on Windows).
* if `IOTDB_CLI_HOME` is not explicitly assigned,
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-client.sh` on
Unix/OS X (or that of `sbin\start-client.bat` on Windows).
then by default `IOTDB_CLI_HOME` is the direct parent directory of `sbin/start-cli.sh` on
Unix/OS X (or that of `sbin\start-cli.bat` on Windows).
If you are not the first time that building IoTDB, remember deleting the following files:
......
......@@ -66,16 +66,16 @@ suppose the IP is <C_IP>.
4. If you just want to have a try by using iotdb-cli, you can:
```
$ docker exec -it /bin/bash <C_ID>
$ (now you have enter the container): /cli/sbin/start-client.sh -h localhost -p 6667 -u root -pw root
$ (now you have enter the container): /cli/sbin/start-cli.sh -h localhost -p 6667 -u root -pw root
```
Or, run a new docker container as the client:
```
$ docker run -it iotdb:base /cli/sbin/start-client.sh -h <C_IP> -p 6667 -u root -pw root
$ docker run -it iotdb:base /cli/sbin/start-cli.sh -h <C_IP> -p 6667 -u root -pw root
```
Or, if you have a iotdb-cli locally (e.g., you have compiled the source code by `mvn package`), and suppose your work_dir is cli/bin, then you can just run:
```
$ start-client.sh -h localhost -p 6667 -u root -pw root
$ start-cli.sh -h localhost -p 6667 -u root -pw root
```
5. If you want to write codes to insert data and query data, please add the following dependence:
```xml
......
......@@ -19,7 +19,7 @@
-->
# Chaper6: JDBC API
# Chapter6: JDBC API
# Usage
......@@ -57,6 +57,8 @@ This chapter provides an example of how to open a database connection, execute a
Requires that you include the packages containing the JDBC classes needed for database programming.
**NOTE: For faster insertion, the insertBatch() in Session is recommended.**
```Java
import java.sql.*;
import org.apache.iotdb.jdbc.IoTDBSQLException;
......
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Chapter6: JDBC API
## Status Code
**Status Code** is introduced in the latest version. For example, as IoTDB requires registering the time series first before writing data, a kind of solution is:
```
try {
writeData();
} catch (SQLException e) {
// the most case is that the time series does not exist
if (e.getMessage().contains("exist")) {
//However, using the content of the error message is not so efficient
registerTimeSeries();
//write data once again
writeData();
}
}
```
With Status Code, instead of writing codes like `if (e.getErrorMessage().contains("exist"))`, we can simply use `e.getStatusType().getCode() == TSStatusType.TIME_SERIES_NOT_EXIST_ERROR.getStatusCode()`.
Here is a list of Status Code and related message:
|Status Code|Status Type|Status Message|
|:---|:---|:---|
|200|SUCCESS_STATUS||
|201|STILL_EXECUTING_STATUS||
|202|INVALID_HANDLE_STATUS||
|301|TIMESERIES_NOT_EXIST_ERROR|Timeseries does not exist|
|302|UNSUPPORTED_FETCH_METADATA_OPERATION_ERROR|Unsupported fetch metadata operation|
|303|FETCH_METADATA_ERROR|Failed to fetch metadata|
|304|CHECK_FILE_LEVEL_ERROR|Meet error while checking file level|
|400|EXECUTE_STATEMENT_ERROR|Execute statement error|
|401|SQL_PARSE_ERROR|Meet error while parsing SQL|
|402|GENERATE_TIME_ZONE_ERROR|Meet error while generating time zone|
|403|SET_TIME_ZONE_ERROR|Meet error while setting time zone|
|500|INTERNAL_SERVER_ERROR|Internal server error|
|600|WRONG_LOGIN_PASSWORD_ERROR|Username or password is wrong|
|601|NOT_LOGIN_ERROR|Has not logged in|
|602|NO_PERMISSION_ERROR|No permissions for this operation|
|603|UNINITIALIZED_AUTH_ERROR|Uninitialized authorizer|
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Chapter7: Session API
# Usage
## Dependencies
* JDK >= 1.8
* Maven >= 3.0
## How to package only client module
In root directory:
> mvn clean package -pl client -am -Dmaven.test.skip=true
## How to install in local maven repository
In root directory:
> mvn clean install -pl client -am -Dmaven.test.skip=true
## Using IoTDB Session with Maven
```
<dependencies>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-client</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
```
## Examples with Session
This chapter provides an example of how to open an IoTDB session, execute a batch insertion.
Requires that you include the packages containing the Client classes needed for database programming.
```Java
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.Statement;
import org.apache.iotdb.session.Session;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.record.RowBatch;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
public class SessionExample {
public static void main(String[] args) throws ClassNotFoundException, IoTDBSessionException {
Session session = new Session("127.0.0.1", 6667, "root", "root");
session.open();
session.setStorageGroup("root.sg1");
session.createTimeseriesResp("root.sg1.d1.s1", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseriesResp("root.sg1.d1.s2", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseriesResp("root.sg1.d1.s3", TSDataType.INT64, TSEncoding.RLE);
Schema schema = new Schema();
schema.registerMeasurement(new MeasurementSchema("s1", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s2", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s3", TSDataType.INT64, TSEncoding.RLE));
RowBatch rowBatch = schema.createRowBatch("root.sg1.d1", 100);
long[] timestamps = rowBatch.timestamps;
Object[] values = rowBatch.values;
for (long time = 0; time < 30000; time++) {
int row = rowBatch.batchSize++;
timestamps[row] = time;
for (int i = 0; i < 3; i++) {
long[] sensor = (long[]) values[i];
sensor[row] = time;
}
if (rowBatch.batchSize == rowBatch.getMaxBatchSize()) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
}
if (rowBatch.batchSize != 0) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
session.close();
}
}
```
> The code is in example/session/src/main/java/org/apache/iotdb/session/SessionExample.java
\ No newline at end of file
......@@ -19,7 +19,7 @@
-->
# Chapter 7: TsFile
# Chapter 8: TsFile
## Usage
......@@ -85,12 +85,12 @@ A TsFile can be generated by following three steps and the complete code will be
```
* With pre-defined schema
```
public TsFileWriter(File file, FileSchema schema) throws IOException
public TsFileWriter(File file, Schema schema) throws IOException
```
This one is for using the HDFS file system. `TsFileOutput` can be an instance of class `HDFSOutput`.
```
public TsFileWriter(TsFileOutput output, FileSchema schema) throws IOException
public TsFileWriter(TsFileOutput output, Schema schema) throws IOException
```
**Parameters:**
......@@ -100,15 +100,15 @@ A TsFile can be generated by following three steps and the complete code will be
* Second, add measurements
Or you can make an instance of class `FileSchema` first and pass this to the constructor of class `TsFileWriter`
Or you can make an instance of class `Schema` first and pass this to the constructor of class `TsFileWriter`
The class `FileSchema` contains a map whose key is the name of one measurement schema, and the value is the schema itself.
The class `Schema` contains a map whose key is the name of one measurement schema, and the value is the schema itself.
Here are the interfaces:
```
// Create an empty FileSchema or from an existing map
public FileSchema()
public FileSchema(Map<String, MeasurementSchema> measurements)
// Create an empty Schema or from an existing map
public Schema()
public Schema(Map<String, MeasurementSchema> measurements)
// Use this two interfaces to add measurements
public void registerMeasurement(MeasurementSchema descriptor)
......@@ -259,7 +259,7 @@ import java.io.File;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.TsFileWriter;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.record.RowBatch;
/**
......@@ -275,7 +275,7 @@ public class TsFileWriteWithRowBatch {
f.delete();
}
FileSchema fileSchema = new FileSchema();
Schema schema = new Schema();
// the number of rows to include in the row batch
int rowNum = 1000000;
......@@ -284,15 +284,15 @@ public class TsFileWriteWithRowBatch {
// add measurements into file schema (all with INT64 data type)
for (int i = 0; i < sensorNum; i++) {
fileSchema.registerMeasurement(
schema.registerMeasurement(
new MeasurementSchema("sensor_" + (i + 1), TSDataType.INT64, TSEncoding.TS_2DIFF));
}
// add measurements into TSFileWriter
TsFileWriter tsFileWriter = new TsFileWriter(f, fileSchema);
TsFileWriter tsFileWriter = new TsFileWriter(f, schema);
// construct the row batch
RowBatch rowBatch = fileSchema.createRowBatch("device_1");
RowBatch rowBatch = schema.createRowBatch("device_1");
long[] timestamps = rowBatch.timestamps;
Object[] values = rowBatch.values;
......
......@@ -7,9 +7,9 @@
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
......@@ -18,53 +18,63 @@
under the License.
-->
# Chapter 7: TsFile
# Chapter 8: TsFile
## TsFile Hierarchy
Here is a brief introduction of the structure of a TsFile file.
## Variable Storage
* **Big Endian**
* For Example, the `int` `0x8` will be stored as `00 00 00 08`, not `08 00 00 00`
* **String with Variable Length**
* The format is `int size` plus `String literal`. Size can be zero.
* Size equals the number of bytes this string will take, and it may not equal to the length of the string.
* For example "sensor_1" will be stored as `00 00 00 08` plus the encoding(ASCII) of "sensor_1".
* Note that for the "Magic String"(file signature) "TsFilev0.8.0", the size(12) and encoding(ASCII)
- **Big Endian**
- For Example, the `int` `0x8` will be stored as `00 00 00 08`, not `08 00 00 00`
- **String with Variable Length**
- The format is `int size` plus `String literal`. Size can be zero.
- Size equals the number of bytes this string will take, and it may not equal to the length of the string.
- For example "sensor_1" will be stored as `00 00 00 08` plus the encoding(ASCII) of "sensor_1".
- Note that for the "Magic String"(file signature) "TsFilev0.8.0", the size(12) and encoding(ASCII)
is fixed so there is no need to put the size before this string literal.
* **Data Type Hardcode**
* 0: BOOLEAN
* 1: INT32 (`int`)
* 2: INT64 (`long`)
* 3: FLOAT
* 4: DOUBLE
* 5: TEXT (`String`)
* **Encoding Type Hardcode**
* 0: PLAIN
* 1: PLAIN_DICTIONARY
* 2: RLE
* 3: DIFF
* 4: TS_2DIFF
* 5: BITMAP
* 6: GORILLA
* 7: REGULAR
* **Compressing Type Hardcode**
* 0: UNCOMPRESSED
* 1: SNAPPY
- **Data Type Hardcode**
- 0: BOOLEAN
- 1: INT32 (`int`)
- 2: INT64 (`long`)
- 3: FLOAT
- 4: DOUBLE
- 5: TEXT (`String`)
- **Encoding Type Hardcode**
- 0: PLAIN
- 1: PLAIN_DICTIONARY
- 2: RLE
- 3: DIFF
- 4: TS_2DIFF
- 5: BITMAP
- 6: GORILLA
- 7: REGULAR
- **Compressing Type Hardcode**
- 0: UNCOMPRESSED
- 1: SNAPPY
- 2: GZIP
- 3: LZO
- 4: SDT
- 5: PAA
- 6: PLA
- **TsDigest Statistics Type Hardcode**
- 0: min_value
- 1: max_value
- 2: first_value
- 3: last_value
- 4: sum_value
## TsFile Overview
Here is a graph about the TsFile structure.
![TsFile Breakdown](https://user-images.githubusercontent.com/40447846/61616997-6fad1300-ac9c-11e9-9c17-46785ebfbc88.png)
## Magic String
There is a 12 bytes magic string:
`TsFilev0.8.0`
......@@ -87,6 +97,7 @@ The `ChunkGroup` has an array of `Chunk`, a following byte `0x00` as the marker,
A `Chunk` represents a *sensor*. There is a byte `0x01` as the marker, following a `ChunkHeader` and an array of `Page`.
###### ChunkHeader
<center>
<table style="text-align:center">
<tr><th>Member Description</th><th>Member Type</td></tr>
......@@ -136,6 +147,7 @@ PageHeader Structure
## Metadata
### TsDeviceMetaData
The first part of metadata is `TsDeviceMetaData`
<center>
......@@ -148,6 +160,7 @@ The first part of metadata is `TsDeviceMetaData`
</center>
Then there is an array of `ChunkGroupMetaData` after `TsDeviceMetaData`
### ChunkGroupMetaData
<center>
......@@ -179,14 +192,57 @@ Then there is an array of `ChunkMetadata` for each `ChunkGroupMetadata`
</table>
</center>
###### TsDigest
###### TsDigest (updated on 2019/8/27)
Right now there are five statistics: `min_value, max_value, first_value, last_value, sum_value`.
In v0.8.0, the storage format of statistics is a name-value pair. That is, `Map<String, ByteBuffer> statistics`. The name is a string (remember the length is before the literal). But for the value, there is also an integer byteLength acting as the self description length of the following value because the value may be of various type. For example, if the `min_value` is an integer 0, then it will be stored as [9 "min_value" 4 0] in the TsFile.
The figure below shows an example of `TsDigest.deserializeFrom(buffer)`. In v0.8.0, we will get
There are five statistics: `min, last, sum, first, max`
```
Map<String, ByteBuffer> statistics = {
"min_value" -> ByteBuffer of int value 0,
"last" -> ByteBuffer of int value 19,
"sum" -> ByteBuffer of double value 1093347116,
"first" -> ByteBuffer of int value 0,
"max_value" -> ByteBuffer of int value 99
}
```
The storage format is a name-value pair. The name is a string (remember the length is before the literal).
![TsDigest ByteBuffer Breakdown comparison](https://user-images.githubusercontent.com/33376433/63765352-664a4280-c8fb-11e9-869e-859edf6d00bb.png)
But for the value, there is also a size integer before the data even if it is not string. For example, if the `min` is 3, then it will be
stored as 3 "min" 4 3 in the TsFile.
In v0.9.0, the storage format is changed to an array for space and time efficiency. That is, `ByteBuffer[] statistics`. Each position of the array has a fixed association with a specific type of statistic, following the order defined in StatisticType:
```
enum StatisticType {
min_value, max_value, first_value, last_value, sum_value
}
```
Therefore, in the above example, we will get
```
ByteBuffer[] statistics = [
ByteBuffer of int value 0, // associated with "min_value"
ByteBuffer of int value 99, // associated with "max_value"
ByteBuffer of int value 0, // associated with "first_value"
ByteBuffer of int value 19, // associated with "last_value"
ByteBuffer of double value 1093347116 // associated with "sum_value"
]
```
As another example in v0.9.0, when deserializing a TsDigest from buffer [3, 0,4,0, 1,4,99, 3,4,19], we get
```
ByteBuffer[] statistics = [
ByteBuffer of int value 0, // associated with "min_value"
ByteBuffer of int value 99, // associated with "max_value"
null, // associated with "first_value"
ByteBuffer of int value 19, // associated with "last_value"
null // associated with "sum_value"
]
```
#### File Metadata
......@@ -207,6 +263,7 @@ After the array of `ChunkGroupMetadata`, here is the last part of the metadata.
</center>
##### DeviceIndexMetadata
<center>
<table style="text-align:center">
<tr><th>Member Description</th><th>Member Type</td></tr>
......@@ -219,6 +276,7 @@ After the array of `ChunkGroupMetadata`, here is the last part of the metadata.
</center>
##### MeasurementSchema
<center>
<table style="text-align:center">
<tr><th>Member Description</th><th>Member Type</td></tr>
......
......@@ -19,7 +19,7 @@
-->
# Chapter 8: System Tools
# Chapter 9: System Tools
## Data Import
......
......@@ -44,12 +44,12 @@ the port number of the server running, set the specific IP and PORT at -h and -p
The Linux and MacOS system startup commands are as follows:
```
Shell > ./sbin/start-client.sh -h 127.0.0.1 -p 6667 -u root -pw root
Shell > ./sbin/start-cli.sh -h 127.0.0.1 -p 6667 -u root -pw root
```
The Windows system startup commands are as follows:
```
Shell > \sbin\start-client.bat -h 127.0.0.1 -p 6667 -u root -pw root
Shell > \sbin\start-cli.bat -h 127.0.0.1 -p 6667 -u root -pw root
```
After using these commands, the client can be started successfully. The successful status will be as follows:
......@@ -86,12 +86,12 @@ Following is a client command which connects the host with IP
The Linux and MacOS system startup commands are as follows:
```
Shell > ./sbin/start-client.sh -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
Shell > ./sbin/start-cli.sh -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
```
The Windows system startup commands are as follows:
```
Shell > \sbin\start-client.bat -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
Shell > \sbin\start-cli.bat -h 10.129.187.21 -p 6667 -u root -pw root -disableIS08601 -maxPRC 10
```
## Cli/shell tool with -e parameter
......@@ -102,7 +102,7 @@ In order to avoid confusion between statements and other parameters, the current
The usage of -e parameter for Cli/shell is as follows:
```
Shell > ./sbin/start-client.sh -h {host} -p {port} -u {user} -pw {password} -e {sql for iotdb}
Shell > ./sbin/start-cli.sh -h {host} -p {port} -u {user} -pw {password} -e {sql for iotdb}
```
In order to better explain the use of -e parameter, take following as an example.
......@@ -117,12 +117,12 @@ port=6667
user=root
pass=root
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "set storage group to root.demo"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "create timeseries root.demo.s1 WITH DATATYPE=INT32, ENCODING=RLE"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(1,10)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(2,11)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(3,12)"
./sbin/start-client.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "select s1 from root.demo"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "set storage group to root.demo"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "create timeseries root.demo.s1 WITH DATATYPE=INT32, ENCODING=RLE"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(1,10)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(2,11)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "insert into root.demo(timestamp,s1) values(3,12)"
./sbin/start-cli.sh -h ${host} -p ${port} -u ${user} -pw ${pass} -e "select s1 from root.demo"
```
The print results are shown in the figure, which are consistent with the client and jdbc operations.
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>iotdb-examples</artifactId>
<groupId>org.apache.iotdb</groupId>
<version>0.9.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jdbc-example</artifactId>
<name>jdbc-example</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-jdbc</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.jdbc.demo;
package org.apache.iotdb;
import java.sql.Connection;
import java.sql.DriverManager;
......@@ -25,31 +25,24 @@ import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
public class StatementDemo {
public class JDBCExample {
public static void main(String[] args) throws ClassNotFoundException, SQLException {
Class.forName("org.apache.iotdb.jdbc.IoTDBDriver");
Connection connection = null;
try {
connection = DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement();
statement.execute("SET STORAGE GROUP TO root.ln.wf01.wt01");
statement.execute(
"CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN");
statement.execute(
"CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=FLOAT, ENCODING=RLE");
statement
.execute("insert into root.ln.wf01.wt01(timestamp,status) values(1509465600000,true)");
statement
.execute("insert into root.ln.wf01.wt01(timestamp,status) values(1509465660000,true)");
statement
.execute("insert into root.ln.wf01.wt01(timestamp,status) values(1509465720000,false)");
statement.execute(
"insert into root.ln.wf01.wt01(timestamp,temperature) values(1509465600000,25.957603)");
statement.execute(
"insert into root.ln.wf01.wt01(timestamp,temperature) values(1509465660000,24.359503)");
statement.execute(
"insert into root.ln.wf01.wt01(timestamp,temperature) values(1509465720000,20.092794)");
try (Connection connection = DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
statement.execute("SET STORAGE GROUP TO root.sg1");
statement.execute("CREATE TIMESERIES root.sg1.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE");
statement.execute("CREATE TIMESERIES root.sg1.d1.s2 WITH DATATYPE=INT64, ENCODING=RLE");
statement.execute("CREATE TIMESERIES root.sg1.d1.s3 WITH DATATYPE=INT64, ENCODING=RLE");
for (int i = 0; i < 10; i++) {
for (int j = 0 ; j < 10; j++) {
statement.addBatch("insert into root.sg1.d1(timestamp, s1, s2, s3) values("+ (i * 10 + j) + "," + 1 + "," + 1 + "," + 1 + ")");
}
statement.executeBatch();
statement.clearBatch();
}
ResultSet resultSet = statement.executeQuery("select * from root");
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
while (resultSet.next()) {
......@@ -57,12 +50,7 @@ public class StatementDemo {
for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
builder.append(resultSet.getString(i)).append(",");
}
System.out.println(builder);
}
statement.close();
} finally {
connection.close();
}
}
}
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.jdbc.demo;
package org.apache.iotdb;
import java.sql.Connection;
import java.sql.DriverManager;
......@@ -30,12 +30,12 @@ public class PrepareStatementDemo {
public static void main(String[] args) throws ClassNotFoundException, SQLException {
Class.forName("org.apache.iotdb.jdbc.IoTDBDriver");
Connection connection = null;
try {
connection = DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
PreparedStatement preparedStatement = connection
.prepareStatement(
"insert into root.ln.wf01.wt01(timestamp,status,temperature) values(?,?,?)");
try (Connection connection = DriverManager
.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
PreparedStatement preparedStatement = connection
.prepareStatement(
"insert into root.ln.wf01.wt01(timestamp,status,temperature) values(?,?,?)")) {
preparedStatement.setLong(1, 1509465600000L);
preparedStatement.setBoolean(2, true);
preparedStatement.setFloat(3, 25.957603f);
......@@ -60,24 +60,22 @@ public class PrepareStatementDemo {
preparedStatement.execute();
preparedStatement.clearParameters();
preparedStatement.close();
ResultSet resultSet = preparedStatement.executeQuery("select * from root");
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
while (resultSet.next()) {
StringBuilder builder = new StringBuilder();
try(ResultSet resultSet = preparedStatement.executeQuery("select * from root")) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
while (resultSet.next()) {
StringBuilder builder = new StringBuilder();
for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
builder.append(resultSet.getString(i)).append(",");
}
System.out.println(builder);
}
for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
builder.append(resultSet.getString(i)).append(",");
System.out
.println(resultSetMetaData.getColumnType(i) + "-" + resultSetMetaData.getColumnName(i));
}
System.out.println(builder);
}
preparedStatement.close();
for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
System.out
.println(resultSetMetaData.getColumnType(i) + "-" + resultSetMetaData.getColumnName(i));
}
} finally {
connection.close();
}
}
......
......@@ -36,6 +36,7 @@
<modules>
<module>kafka</module>
<module>rocketmq</module>
<module>session</module>
<module>tsfile</module>
</modules>
<build>
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.example;
package org.apache.iotdb.rocketmq;
import org.apache.iotdb.jdbc.Config;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.example;
package org.apache.iotdb.rocketmq;
import java.sql.Connection;
import java.sql.DriverManager;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.example;
package org.apache.iotdb.rocketmq;
import org.apache.rocketmq.client.exception.MQBrokerException;
import org.apache.rocketmq.client.exception.MQClientException;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.example;
package org.apache.iotdb.rocketmq;
/**
* @author lta
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>iotdb-examples</artifactId>
<groupId>org.apache.iotdb</groupId>
<version>0.9.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>client-example</artifactId>
<name>client-example</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-session</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb;
import org.apache.iotdb.session.IoTDBSessionException;
import org.apache.iotdb.session.Session;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.record.RowBatch;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
public class SessionExample {
public static void main(String[] args) throws IoTDBSessionException {
Session session = new Session("127.0.0.1", 6667, "root", "root");
session.open();
session.setStorageGroup("root.sg1");
session.createTimeseries("root.sg1.d1.s1", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseries("root.sg1.d1.s2", TSDataType.INT64, TSEncoding.RLE);
session.createTimeseries("root.sg1.d1.s3", TSDataType.INT64, TSEncoding.RLE);
Schema schema = new Schema();
schema.registerMeasurement(new MeasurementSchema("s1", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s2", TSDataType.INT64, TSEncoding.RLE));
schema.registerMeasurement(new MeasurementSchema("s3", TSDataType.INT64, TSEncoding.RLE));
RowBatch rowBatch = schema.createRowBatch("root.sg1.d1", 100);
long[] timestamps = rowBatch.timestamps;
Object[] values = rowBatch.values;
for (long time = 0; time < 30000; time++) {
int row = rowBatch.batchSize++;
timestamps[row] = time;
for (int i = 0; i < 3; i++) {
long[] sensor = (long[]) values[i];
sensor[row] = time;
}
if (rowBatch.batchSize == rowBatch.getMaxBatchSize()) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
}
if (rowBatch.batchSize != 0) {
session.insertBatch(rowBatch);
rowBatch.reset();
}
session.close();
}
}
\ No newline at end of file
......@@ -19,32 +19,22 @@
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-examples</artifactId>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-examples</artifactId>
<version>0.9.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>tsfile-example</artifactId>
<version>0.9.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<groupId>org.apache.iotdb</groupId>
<artifactId>tsfile-example</artifactId>
<version>0.9.0-SNAPSHOT</version>
<name>tsfile-example</name>
<!-- FIXME change it to the project's website -->
<url>http://www.example.com</url>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>tsfile</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
<name>tsfile-example</name>
<dependencies>
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>tsfile</artifactId>
<version>0.9.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
......@@ -81,4 +81,4 @@ The example is to show how to write and read a TsFile File.
This class is to show the structure of a TsFile.
### Notice
 For detail, please refer to https://github.com/thulab/tsfile/wiki/Get-Started.
 For detail, please refer to https://github.com/apache/incubator-iotdb/blob/master/tsfile/README.md.
......@@ -23,9 +23,10 @@ import java.io.File;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.TsFileWriter;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.record.RowBatch;
/**
* An example of writing data with RowBatch to TsFile
*/
......@@ -39,7 +40,7 @@ public class TsFileWriteWithRowBatch {
f.delete();
}
FileSchema fileSchema = new FileSchema();
Schema schema = new Schema();
// the number of rows to include in the row batch
int rowNum = 1000000;
......@@ -48,15 +49,15 @@ public class TsFileWriteWithRowBatch {
// add measurements into file schema (all with INT64 data type)
for (int i = 0; i < sensorNum; i++) {
fileSchema.registerMeasurement(
schema.registerMeasurement(
new MeasurementSchema("sensor_" + (i + 1), TSDataType.INT64, TSEncoding.TS_2DIFF));
}
// add measurements into TSFileWriter
TsFileWriter tsFileWriter = new TsFileWriter(f, fileSchema);
TsFileWriter tsFileWriter = new TsFileWriter(f, schema);
// construct the row batch
RowBatch rowBatch = fileSchema.createRowBatch("device_1");
RowBatch rowBatch = schema.createRowBatch("device_1");
long[] timestamps = rowBatch.timestamps;
Object[] values = rowBatch.values;
......
......@@ -30,7 +30,7 @@ import org.apache.iotdb.tsfile.hadoop.io.HDFSOutputStream;
import org.apache.iotdb.tsfile.timeseries.basis.TsFile;
import org.apache.iotdb.tsfile.write.exception.InvalidJsonSchemaException;
import org.apache.iotdb.tsfile.write.exception.WriteProcessException;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -42,9 +42,9 @@ public class TSFRecordWriter extends RecordWriter<NullWritable, TSRow> {
public TSFRecordWriter(Path path, JSONObject schema) throws InterruptedException, IOException {
// construct the internalrecordwriter
FileSchema fileSchema = null;
Schema fileSchema = null;
try {
fileSchema = new FileSchema(schema);
fileSchema = new Schema(schema);
} catch (InvalidJsonSchemaException e) {
throw new InterruptedException(String.format("Construct the tsfile schema failed"), e);
}
......
......@@ -18,11 +18,6 @@
*/
package org.apache.iotdb.jdbc;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.SocketException;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
......@@ -40,21 +35,12 @@ import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
import java.time.ZoneId;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
import org.apache.iotdb.service.rpc.thrift.ServerProperties;
import org.apache.iotdb.service.rpc.thrift.TSCloseSessionReq;
import org.apache.iotdb.service.rpc.thrift.TSGetTimeZoneResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionReq;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionResp;
import org.apache.iotdb.service.rpc.thrift.TSProtocolVersion;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneReq;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneResp;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.rpc.IoTDBRPCException;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol;
......@@ -65,8 +51,8 @@ import org.slf4j.LoggerFactory;
public class IoTDBConnection implements Connection {
Logger logger = LoggerFactory.getLogger(IoTDBConnection.class);
private final List<TSProtocolVersion> supportedProtocols = new LinkedList<>();
private static final Logger logger = LoggerFactory.getLogger(IoTDBConnection.class);
private final TSProtocolVersion protocolVersion = TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V1;
public TSIService.Iface client = null;
public TS_SessionHandle sessionHandle = null;
private IoTDBConnectionParams params;
......@@ -87,8 +73,6 @@ public class IoTDBConnection implements Connection {
}
params = Utils.parseUrl(url, info);
supportedProtocols.add(TSProtocolVersion.TSFILE_SERVICE_PROTOCOL_V1);
openTransport();
if(Config.rpcThriftCompressionEnable) {
client = new TSIService.Client(new TCompactProtocol(transport));
......@@ -99,15 +83,10 @@ public class IoTDBConnection implements Connection {
// open client session
openSession();
// Wrap the client with a thread-safe proxy to serialize the RPC calls
client = newSynchronizedClient(client);
client = RpcUtils.newSynchronizedClient(client);
autoCommit = false;
}
public static TSIService.Iface newSynchronizedClient(TSIService.Iface client) {
return (TSIService.Iface) Proxy.newProxyInstance(IoTDBConnection.class.getClassLoader(),
new Class[]{TSIService.Iface.class}, new SynchronizedHandler(client));
}
@Override
public boolean isWrapperFor(Class<?> arg0) throws SQLException {
throw new SQLException("Method not supported");
......@@ -416,18 +395,13 @@ public class IoTDBConnection implements Connection {
private void openTransport() throws TTransportException {
transport = new TSocket(params.getHost(), params.getPort(), Config.connectionTimeoutInMs);
try {
transport.getSocket().setKeepAlive(true);
} catch (SocketException e) {
logger.error("Cannot set socket keep alive because: ", e);
}
if (!transport.isOpen()) {
transport.open();
}
}
private void openSession() throws SQLException {
TSOpenSessionReq openReq = new TSOpenSessionReq(TSProtocolVersion.TSFILE_SERVICE_PROTOCOL_V1);
TSOpenSessionReq openReq = new TSOpenSessionReq(TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V1);
openReq.setUsername(params.getUsername());
openReq.setPassword(params.getPassword());
......@@ -437,14 +411,16 @@ public class IoTDBConnection implements Connection {
// validate connection
try {
Utils.verifySuccess(openResp.getStatus());
} catch (IoTDBSQLException e) {
RpcUtils.verifySuccess(openResp.getStatus());
} catch (IoTDBRPCException e) {
// failed to connect, disconnect from the server
transport.close();
throw e;
throw new IoTDBSQLException(e.getMessage());
}
if (!supportedProtocols.contains(openResp.getServerProtocolVersion())) {
throw new TException("Unsupported TsFile protocol");
if (protocolVersion.getValue() != openResp.getServerProtocolVersion().getValue()) {
throw new TException(String
.format("Protocol not supported, Client version is {}, but Server version is {}",
protocolVersion.getValue(), openResp.getServerProtocolVersion().getValue()));
}
setProtocol(openResp.getServerProtocolVersion());
sessionHandle = openResp.getSessionHandle();
......@@ -476,7 +452,7 @@ public class IoTDBConnection implements Connection {
client = new TSIService.Client(new TBinaryProtocol(transport));
}
openSession();
client = newSynchronizedClient(client);
client = RpcUtils.newSynchronizedClient(client);
flag = true;
break;
}
......@@ -497,14 +473,22 @@ public class IoTDBConnection implements Connection {
}
TSGetTimeZoneResp resp = client.getTimeZone();
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
return resp.getTimeZone();
}
public void setTimeZone(String zoneId) throws TException, IoTDBSQLException {
TSSetTimeZoneReq req = new TSSetTimeZoneReq(zoneId);
TSSetTimeZoneResp resp = client.setTimeZone(req);
Utils.verifySuccess(resp.getStatus());
TSRPCResp resp = client.setTimeZone(req);
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
this.zoneId = ZoneId.of(zoneId);
}
......@@ -520,32 +504,4 @@ public class IoTDBConnection implements Connection {
this.protocol = protocol;
}
private static class SynchronizedHandler implements InvocationHandler {
private final TSIService.Iface client;
SynchronizedHandler(TSIService.Iface client) {
this.client = client;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
try {
synchronized (client) {
return method.invoke(client, args);
}
} catch (InvocationTargetException e) {
// all IFace APIs throw TException
if (e.getTargetException() instanceof TException) {
throw e.getTargetException();
} else {
// should not happen
throw new TException("Error in calling method " + method.getName(),
e.getTargetException());
}
} catch (Exception e) {
throw new TException("Error in calling method " + method.getName(), e);
}
}
}
}
......@@ -25,6 +25,8 @@ import java.sql.RowIdLifetime;
import java.sql.SQLException;
import java.util.List;
import java.util.Set;
import org.apache.iotdb.rpc.IoTDBRPCException;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
......@@ -82,7 +84,11 @@ public class IoTDBDatabaseMetadata implements DatabaseMetaData {
req.setColumnPath(schemaPattern);
try {
TSFetchMetadataResp resp = client.fetchMetadata(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
return new IoTDBMetadataResultSet(resp.getColumnsList(), null, null);
} catch (TException e) {
throw new TException("Conncetion error when fetching column metadata", e);
......@@ -92,7 +98,11 @@ public class IoTDBDatabaseMetadata implements DatabaseMetaData {
req.setColumnPath(schemaPattern);
try {
TSFetchMetadataResp resp = client.fetchMetadata(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
return new IoTDBMetadataResultSet(resp.getColumnsList(), null, null);
} catch (TException e) {
throw new TException("Conncetion error when fetching delta object metadata", e);
......@@ -101,7 +111,11 @@ public class IoTDBDatabaseMetadata implements DatabaseMetaData {
req = new TSFetchMetadataReq(Constant.GLOBAL_SHOW_STORAGE_GROUP_REQ);
try {
TSFetchMetadataResp resp = client.fetchMetadata(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
Set<String> showStorageGroup = resp.getShowStorageGroups();
return new IoTDBMetadataResultSet(null, showStorageGroup, null);
} catch (TException e) {
......@@ -112,7 +126,11 @@ public class IoTDBDatabaseMetadata implements DatabaseMetaData {
req.setColumnPath(schemaPattern);
try {
TSFetchMetadataResp resp = client.fetchMetadata(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
List<List<String>> showTimeseriesList = resp.getShowTimeseriesList();
return new IoTDBMetadataResultSet(null, null, showTimeseriesList);
} catch (TException e) {
......@@ -1247,7 +1265,11 @@ public class IoTDBDatabaseMetadata implements DatabaseMetaData {
TSFetchMetadataReq req = new TSFetchMetadataReq("METADATA_IN_JSON");
TSFetchMetadataResp resp;
resp = client.fetchMetadata(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
return resp.getMetadataInJson();
}
}
......@@ -22,11 +22,12 @@ package org.apache.iotdb.jdbc;
import java.sql.SQLException;
import java.time.ZoneId;
import java.util.List;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSIService.Iface;
import org.apache.iotdb.service.rpc.thrift.TSInsertionReq;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.thrift.TException;
public class IoTDBPreparedInsertionStatement extends IoTDBPreparedStatement {
......@@ -44,12 +45,12 @@ public class IoTDBPreparedInsertionStatement extends IoTDBPreparedStatement {
public boolean execute() throws SQLException {
try {
TSExecuteStatementResp resp = client.executeInsertion(req);
TSExecuteStatementResp resp = client.insert(req);
req.unsetDeviceId();
req.unsetMeasurements();
req.unsetTimestamp();
req.unsetValues();
return resp.getStatus().getStatusCode() == TS_StatusCode.SUCCESS_STATUS;
return resp.getStatus().getStatusType().getCode() == TSStatusType.SUCCESS_STATUS.getStatusCode();
} catch (TException e) {
throw new SQLException(e);
}
......
......@@ -48,6 +48,7 @@ import java.util.List;
import java.util.Map;
import org.apache.iotdb.service.rpc.thrift.TSIService.Iface;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.thrift.TException;
public class IoTDBPreparedStatement extends IoTDBStatement implements PreparedStatement {
......
......@@ -46,13 +46,9 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationResp;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TSOperationHandle;
import org.apache.iotdb.service.rpc.thrift.TSQueryDataSet;
import org.apache.iotdb.rpc.IoTDBRPCException;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.iotdb.tsfile.read.common.Field;
import org.apache.iotdb.tsfile.read.common.RowRecord;
import org.apache.thrift.TException;
......@@ -213,10 +209,10 @@ public class IoTDBQueryResultSet implements ResultSet {
try {
if (operationHandle != null) {
TSCloseOperationReq closeReq = new TSCloseOperationReq(operationHandle, queryId);
TSCloseOperationResp closeResp = client.closeOperation(closeReq);
Utils.verifySuccess(closeResp.getStatus());
TSRPCResp closeResp = client.closeOperation(closeReq);
RpcUtils.verifySuccess(closeResp.getStatus());
}
} catch (SQLException e) {
} catch (IoTDBRPCException e) {
throw new SQLException("Error occurs for close opeation in server side becasuse " + e);
} catch (TException e) {
throw new SQLException(
......@@ -708,7 +704,11 @@ public class IoTDBQueryResultSet implements ResultSet {
try {
TSFetchResultsResp resp = client.fetchResults(req);
Utils.verifySuccess(resp.getStatus());
try {
RpcUtils.verifySuccess(resp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
if (!resp.hasResultSet) {
emptyResultSet = true;
} else {
......
......@@ -29,4 +29,8 @@ public class IoTDBSQLException extends SQLException {
super(reason);
}
public IoTDBSQLException(Throwable cause) {
super(cause);
}
}
......@@ -30,18 +30,11 @@ import java.time.ZoneId;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.iotdb.service.rpc.thrift.TSCancelOperationReq;
import org.apache.iotdb.service.rpc.thrift.TSCancelOperationResp;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationResp;
import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementReq;
import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementReq;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TSOperationHandle;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.IoTDBRPCException;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.thrift.TException;
public class IoTDBStatement implements Statement {
......@@ -49,6 +42,7 @@ public class IoTDBStatement implements Statement {
private static final String SHOW_TIMESERIES_COMMAND_LOWERCASE = "show timeseries";
private static final String SHOW_STORAGE_GROUP_COMMAND_LOWERCASE = "show storage group";
private static final String METHOD_NOT_SUPPORTED_STRING = "Method not supported";
ZoneId zoneId;
private ResultSet resultSet = null;
private IoTDBConnection connection;
......@@ -130,8 +124,8 @@ public class IoTDBStatement implements Statement {
try {
if (operationHandle != null) {
TSCancelOperationReq closeReq = new TSCancelOperationReq(operationHandle);
TSCancelOperationResp closeResp = client.cancelOperation(closeReq);
Utils.verifySuccess(closeResp.getStatus());
TSRPCResp closeResp = client.cancelOperation(closeReq);
RpcUtils.verifySuccess(closeResp.getStatus());
}
} catch (Exception e) {
throw new SQLException("Error occurs when canceling statement.", e);
......@@ -157,8 +151,8 @@ public class IoTDBStatement implements Statement {
if (operationHandle != null) {
TSCloseOperationReq closeReq = new TSCloseOperationReq(operationHandle, -1);
closeReq.setStmtId(stmtId);
TSCloseOperationResp closeResp = client.closeOperation(closeReq);
Utils.verifySuccess(closeResp.getStatus());
TSRPCResp closeResp = client.closeOperation(closeReq);
RpcUtils.verifySuccess(closeResp.getStatus());
}
} catch (Exception e) {
throw new SQLException("Error occurs when closing statement.", e);
......@@ -246,7 +240,11 @@ public class IoTDBStatement implements Statement {
TSExecuteStatementReq execReq = new TSExecuteStatementReq(sessionHandle, sql);
TSExecuteStatementResp execResp = client.executeStatement(execReq);
operationHandle = execResp.getOperationHandle();
Utils.verifySuccess(execResp.getStatus());
try {
RpcUtils.verifySuccess(execResp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
if (execResp.getOperationHandle().hasResultSet) {
IoTDBQueryResultSet resSet = new IoTDBQueryResultSet(this,
execResp.getColumns(), client,
......@@ -288,7 +286,7 @@ public class IoTDBStatement implements Statement {
TSExecuteBatchStatementReq execReq = new TSExecuteBatchStatementReq(sessionHandle,
batchSQLList);
TSExecuteBatchStatementResp execResp = client.executeBatchStatement(execReq);
if (execResp.getStatus().statusCode == TS_StatusCode.SUCCESS_STATUS) {
if (execResp.getStatus().getStatusType().getCode() == TSStatusType.SUCCESS_STATUS.getStatusCode()) {
if (execResp.getResult() == null) {
return new int[0];
} else {
......@@ -303,7 +301,7 @@ public class IoTDBStatement implements Statement {
} else {
BatchUpdateException exception;
if (execResp.getResult() == null) {
exception = new BatchUpdateException(execResp.getStatus().errorMessage, new int[0]);
exception = new BatchUpdateException(execResp.getStatus().getStatusType().getMessage(), new int[0]);
} else {
List<Integer> result = execResp.getResult();
int len = result.size();
......@@ -311,7 +309,7 @@ public class IoTDBStatement implements Statement {
for (int i = 0; i < len; i++) {
updateArray[i] = result.get(i);
}
exception = new BatchUpdateException(execResp.getStatus().errorMessage, updateArray);
exception = new BatchUpdateException(execResp.getStatus().getStatusType().getMessage(), updateArray);
}
throw exception;
}
......@@ -346,7 +344,11 @@ public class IoTDBStatement implements Statement {
TSExecuteStatementReq execReq = new TSExecuteStatementReq(sessionHandle, sql);
TSExecuteStatementResp execResp = client.executeQueryStatement(execReq);
operationHandle = execResp.getOperationHandle();
Utils.verifySuccess(execResp.getStatus());
try {
RpcUtils.verifySuccess(execResp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
IoTDBQueryResultSet resSet = new IoTDBQueryResultSet(this, execResp.getColumns(), client,
operationHandle, sql, execResp.getOperationType(), execResp.getDataTypeList(),
queryId.getAndIncrement());
......@@ -399,7 +401,11 @@ public class IoTDBStatement implements Statement {
TSExecuteStatementReq execReq = new TSExecuteStatementReq(sessionHandle, sql);
TSExecuteStatementResp execResp = client.executeUpdateStatement(execReq);
operationHandle = execResp.getOperationHandle();
Utils.verifySuccess(execResp.getStatus());
try {
RpcUtils.verifySuccess(execResp.getStatus());
} catch (IoTDBRPCException e) {
throw new IoTDBSQLException(e.getMessage());
}
return 0;
}
......
......@@ -26,8 +26,6 @@ import java.util.regex.Pattern;
import org.apache.iotdb.service.rpc.thrift.TSDataValue;
import org.apache.iotdb.service.rpc.thrift.TSQueryDataSet;
import org.apache.iotdb.service.rpc.thrift.TSRowRecord;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.read.common.Field;
......@@ -39,18 +37,11 @@ import org.apache.iotdb.tsfile.utils.Binary;
*/
public class Utils {
/**
* Private constructor of Utils Class.
*/
private Utils(){
throw new IllegalAccessError("Utility class");
}
/**
* Parse JDBC connection URL The only supported format of the URL is:
* jdbc:iotdb://localhost:6667/.
*/
public static IoTDBConnectionParams parseUrl(String url, Properties info)
static IoTDBConnectionParams parseUrl(String url, Properties info)
throws IoTDBURLException {
IoTDBConnectionParams params = new IoTDBConnectionParams(url);
if (url.trim().equalsIgnoreCase(Config.IOTDB_URL_PREFIX)) {
......@@ -79,24 +70,13 @@ public class Utils {
return params;
}
/**
* verify success.
*
* @param status -status
*/
public static void verifySuccess(TS_Status status) throws IoTDBSQLException {
if (status.getStatusCode() != TS_StatusCode.SUCCESS_STATUS) {
throw new IoTDBSQLException(status.errorMessage);
}
}
/**
* convert row records.
*
* @param tsQueryDataSet -query data set
* @return -list of row record
*/
public static List<RowRecord> convertRowRecords(TSQueryDataSet tsQueryDataSet) {
static List<RowRecord> convertRowRecords(TSQueryDataSet tsQueryDataSet) {
List<RowRecord> records = new ArrayList<>();
for (TSRowRecord ts : tsQueryDataSet.getRecords()) {
RowRecord r = new RowRecord(ts.getTimestamp());
......@@ -151,4 +131,5 @@ public class Utils {
dataType));
}
}
}
......@@ -29,12 +29,9 @@ import java.sql.Statement;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.List;
import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementReq;
import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Before;
......@@ -50,8 +47,10 @@ public class BatchTest {
private TSIService.Iface client;
@Mock
private TS_SessionHandle sessHandle;
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_Status Status_ERROR = new TS_Status(TS_StatusCode.ERROR_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_StatusType errorStatus = new TS_StatusType(TSStatusType.INTERNAL_SERVER_ERROR.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
private TS_Status Status_ERROR = new TS_Status(errorStatus);
private TSExecuteBatchStatementResp resp;
private ZoneId zoneID = ZoneId.systemDefault();
......
......@@ -24,13 +24,9 @@ import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.apache.iotdb.service.rpc.thrift.ServerProperties;
import org.apache.iotdb.service.rpc.thrift.TSGetTimeZoneResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneReq;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneResp;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Before;
......@@ -44,7 +40,8 @@ public class IoTDBConnectionTest {
private TSIService.Iface client;
private IoTDBConnection connection = new IoTDBConnection();
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
@Before
public void setUp() throws Exception {
......@@ -59,7 +56,7 @@ public class IoTDBConnectionTest {
public void testSetTimeZone() throws TException, IoTDBSQLException {
String timeZone = "Asia/Shanghai";
when(client.setTimeZone(any(TSSetTimeZoneReq.class)))
.thenReturn(new TSSetTimeZoneResp(Status_SUCCESS));
.thenReturn(new TSRPCResp(Status_SUCCESS));
connection.client = client;
connection.setTimeZone(timeZone);
assertEquals(connection.getTimeZone(), timeZone);
......
......@@ -30,11 +30,9 @@ import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
......@@ -66,7 +64,8 @@ public class IoTDBDatabaseMetadataTest {
@Mock
private TSFetchMetadataResp fetchMetadataResp;
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
private DatabaseMetaData databaseMetaData;
......@@ -96,8 +95,7 @@ public class IoTDBDatabaseMetadataTest {
String standard =
"Column,\n" + "root.vehicle.d0.s0,\n" + "root.vehicle.d0.s1,\n" + "root.vehicle.d0.s2,\n";
try {
ResultSet resultSet = databaseMetaData.getColumns(Constant.CATALOG_COLUMN, "root", null, null);
try (ResultSet resultSet = databaseMetaData.getColumns(Constant.CATALOG_COLUMN, "root", null, null)) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......@@ -129,9 +127,8 @@ public class IoTDBDatabaseMetadataTest {
when(fetchMetadataResp.getColumnsList()).thenReturn(columnList);
String standard = "Column,\n" + "root.vehicle.d0,\n";
try {
ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_DEVICE, "vehicle", null, null);
try (ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_DEVICE, "vehicle", null, null)) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......@@ -189,9 +186,8 @@ public class IoTDBDatabaseMetadataTest {
+ "root.vehicle.d0.s0,root.vehicle,INT32,RLE,\n"
+ "root.vehicle.d0.s1,root.vehicle,INT64,RLE,\n"
+ "root.vehicle.d0.s2,root.vehicle,FLOAT,RLE,\n";
try {
ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_TIMESERIES, "root", null, null);
try (ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_TIMESERIES, "root", null, null);) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......@@ -230,10 +226,9 @@ public class IoTDBDatabaseMetadataTest {
when(fetchMetadataResp.getShowTimeseriesList()).thenReturn(tslist);
String standard = "DataType,\n" + "INT32,\n";
try {
ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_TIMESERIES, "root.vehicle.d0.s0", null,
null);
try (ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_TIMESERIES, "root.vehicle.d0.s0", null,
null)) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
StringBuilder resultStr = new StringBuilder();
resultStr.append(resultSetMetaData.getColumnName(3)).append(",\n");
......@@ -259,9 +254,8 @@ public class IoTDBDatabaseMetadataTest {
when(fetchMetadataResp.getShowStorageGroups()).thenReturn(sgSet);
String standard = "Storage Group,\n" + "root.vehicle,\n";
try {
ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_STORAGE_GROUP, null, null, null);
try (ResultSet resultSet = databaseMetaData
.getColumns(Constant.CATALOG_STORAGE_GROUP, null, null, null)) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......
......@@ -26,14 +26,10 @@ import static org.mockito.Mockito.when;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.ZoneId;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementReq;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSGetOperationStatusResp;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.iotdb.service.rpc.thrift.TSIService.Iface;
import org.apache.iotdb.service.rpc.thrift.TSOperationHandle;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
......@@ -45,7 +41,7 @@ public class IoTDBPreparedStatementTest {
@Mock
TSExecuteStatementResp execStatementResp;
@Mock
TSGetOperationStatusResp getOperationStatusResp;
TSRPCResp getOperationStatusResp;
private ZoneId zoneId = ZoneId.systemDefault();
@Mock
private IoTDBConnection connection;
......@@ -53,7 +49,8 @@ public class IoTDBPreparedStatementTest {
private Iface client;
@Mock
private TS_SessionHandle sessHandle;
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
@Mock
private TSOperationHandle tOperationHandle;
......
......@@ -26,25 +26,16 @@ import static org.mockito.Mockito.when;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.List;
import org.apache.iotdb.service.rpc.thrift.TSDataValue;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementReq;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataResp;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsResp;
import org.apache.iotdb.service.rpc.thrift.TSIService;
import org.apache.iotdb.service.rpc.thrift.TSOperationHandle;
import org.apache.iotdb.service.rpc.thrift.TSQueryDataSet;
import org.apache.iotdb.service.rpc.thrift.TSRowRecord;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.junit.Assert;
import org.junit.Before;
......@@ -117,7 +108,8 @@ public class IoTDBQueryResultSetTest {
@Mock
private TSFetchResultsResp fetchResultsResp;
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
private ZoneId zoneID = ZoneId.systemDefault();
@Before
......@@ -137,6 +129,10 @@ public class IoTDBQueryResultSetTest {
when(client.fetchResults(any(TSFetchResultsReq.class))).thenReturn(fetchResultsResp);
when(fetchResultsResp.getStatus()).thenReturn(Status_SUCCESS);
TSRPCResp closeResp = new TSRPCResp();
closeResp.setStatus(Status_SUCCESS);
when(client.closeOperation(any(TSCloseOperationReq.class))).thenReturn(closeResp);
}
@SuppressWarnings("resource")
......@@ -181,50 +177,51 @@ public class IoTDBQueryResultSetTest {
when(fetchResultsResp.getQueryDataSet()).thenReturn(tsQueryDataSet);
if (hasResultSet) {
ResultSet resultSet = statement.getResultSet();
// check columnInfoMap
Assert.assertEquals(resultSet.findColumn("Time"), 1);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s2"), 2);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s1"), 3);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s0"), 4);
try (ResultSet resultSet = statement.getResultSet()) {
// check columnInfoMap
Assert.assertEquals(resultSet.findColumn("Time"), 1);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s2"), 2);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s1"), 3);
Assert.assertEquals(resultSet.findColumn("root.vehicle.d0.s0"), 4);
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
// check columnInfoList
Assert.assertEquals(resultSetMetaData.getColumnName(1), "Time");
Assert.assertEquals(resultSetMetaData.getColumnName(2), "root.vehicle.d0.s2");
Assert.assertEquals(resultSetMetaData.getColumnName(3), "root.vehicle.d0.s1");
Assert.assertEquals(resultSetMetaData.getColumnName(4), "root.vehicle.d0.s0");
Assert.assertEquals(resultSetMetaData.getColumnName(5), "root.vehicle.d0.s2");
// check columnTypeList
Assert.assertEquals(resultSetMetaData.getColumnType(1), Types.TIMESTAMP);
Assert.assertEquals(resultSetMetaData.getColumnType(2), Types.FLOAT);
Assert.assertEquals(resultSetMetaData.getColumnType(3), Types.BIGINT);
Assert.assertEquals(resultSetMetaData.getColumnType(4), Types.INTEGER);
Assert.assertEquals(resultSetMetaData.getColumnType(5), Types.FLOAT);
// check fetched result
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
for (int i = 1; i < colCount + 1; i++) { // meta title
resultStr.append(resultSetMetaData.getColumnName(i)).append(",");
}
resultStr.append("\n");
while (resultSet.next()) { // data
for (int i = 1; i <= colCount; i++) {
resultStr.append(resultSet.getString(i)).append(",");
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
// check columnInfoList
Assert.assertEquals(resultSetMetaData.getColumnName(1), "Time");
Assert.assertEquals(resultSetMetaData.getColumnName(2), "root.vehicle.d0.s2");
Assert.assertEquals(resultSetMetaData.getColumnName(3), "root.vehicle.d0.s1");
Assert.assertEquals(resultSetMetaData.getColumnName(4), "root.vehicle.d0.s0");
Assert.assertEquals(resultSetMetaData.getColumnName(5), "root.vehicle.d0.s2");
// check columnTypeList
Assert.assertEquals(resultSetMetaData.getColumnType(1), Types.TIMESTAMP);
Assert.assertEquals(resultSetMetaData.getColumnType(2), Types.FLOAT);
Assert.assertEquals(resultSetMetaData.getColumnType(3), Types.BIGINT);
Assert.assertEquals(resultSetMetaData.getColumnType(4), Types.INTEGER);
Assert.assertEquals(resultSetMetaData.getColumnType(5), Types.FLOAT);
// check fetched result
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
for (int i = 1; i < colCount + 1; i++) { // meta title
resultStr.append(resultSetMetaData.getColumnName(i)).append(",");
}
resultStr.append("\n");
while (resultSet.next()) { // data
for (int i = 1; i <= colCount; i++) {
resultStr.append(resultSet.getString(i)).append(",");
}
resultStr.append("\n");
fetchResultsResp.hasResultSet = false; // at the second time to fetch
fetchResultsResp.hasResultSet = false; // at the second time to fetch
}
String standard =
"Time,root.vehicle.d0.s2,root.vehicle.d0.s1,root.vehicle.d0.s0,root.vehicle.d0.s2,\n"
+ "2,2.22,40000,null,2.22,\n" + "3,3.33,null,null,3.33,\n"
+ "4,4.44,null,null,4.44,\n"
+ "50,null,50000,null,null,\n" + "100,null,199,null,null,\n"
+ "101,null,199,null,null,\n"
+ "103,null,199,null,null,\n" + "105,11.11,199,33333,11.11,\n"
+ "1000,1000.11,55555,22222,1000.11,\n";
Assert.assertEquals(resultStr.toString(), standard);
}
String standard =
"Time,root.vehicle.d0.s2,root.vehicle.d0.s1,root.vehicle.d0.s0,root.vehicle.d0.s2,\n"
+ "2,2.22,40000,null,2.22,\n" + "3,3.33,null,null,3.33,\n"
+ "4,4.44,null,null,4.44,\n"
+ "50,null,50000,null,null,\n" + "100,null,199,null,null,\n"
+ "101,null,199,null,null,\n"
+ "103,null,199,null,null,\n" + "105,11.11,199,33333,11.11,\n"
+ "1000,1000.11,55555,22222,1000.11,\n";
Assert.assertEquals(resultStr.toString(), standard);
}
}
......@@ -276,14 +273,12 @@ public class IoTDBQueryResultSetTest {
} else {
if (i == 0) {
value.setFloat_val((float) item[3 * i + 3]);
value.setType(item[3 * i + 2].toString());
} else if (i == 1) {
value.setLong_val((long) item[3 * i + 3]);
value.setType(item[3 * i + 2].toString());
} else {
value.setInt_val((int) item[3 * i + 3]);
value.setType(item[3 * i + 2].toString());
}
value.setType(item[3 * i + 2].toString());
}
values.add(value);
}
......
......@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.when;
......@@ -30,12 +31,10 @@ import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchMetadataResp;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.iotdb.service.rpc.thrift.TSIService.Iface;
import org.apache.iotdb.service.rpc.thrift.TS_SessionHandle;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Assert;
......@@ -58,7 +57,8 @@ public class IoTDBStatementTest {
@Mock
private TSFetchMetadataResp fetchMetadataResp;
private TS_Status Status_SUCCESS = new TS_Status(TS_StatusCode.SUCCESS_STATUS);
private TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
private TS_Status Status_SUCCESS = new TS_Status(successStatus);
private ZoneId zoneID = ZoneId.systemDefault();
@Before
......@@ -116,9 +116,8 @@ public class IoTDBStatementTest {
+ "root.vehicle.d0.s2,root.vehicle,FLOAT,RLE,\n";
when(fetchMetadataResp.getShowTimeseriesList()).thenReturn(tslist);
boolean res = stmt.execute("show timeseries root");
assertEquals(res, true);
try {
ResultSet resultSet = stmt.getResultSet();
assertTrue(res);
try (ResultSet resultSet = stmt.getResultSet()) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......@@ -147,9 +146,8 @@ public class IoTDBStatementTest {
when(fetchMetadataResp.getShowStorageGroups()).thenReturn(sgSet);
String standard = "Storage Group,\nroot.vehicle,\n";
boolean res = stmt.execute("show storage group");
assertEquals(res, true);
try {
ResultSet resultSet = stmt.getResultSet();
assertTrue(res);
try (ResultSet resultSet = stmt.getResultSet()) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
int colCount = resultSetMetaData.getColumnCount();
StringBuilder resultStr = new StringBuilder();
......
......@@ -26,11 +26,15 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusType;
import org.apache.iotdb.service.rpc.thrift.TSDataValue;
import org.apache.iotdb.service.rpc.thrift.TSQueryDataSet;
import org.apache.iotdb.service.rpc.thrift.TSRowRecord;
import org.apache.iotdb.service.rpc.thrift.TS_Status;
import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.service.rpc.thrift.*;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.read.common.Field;
import org.apache.iotdb.tsfile.read.common.RowRecord;
......@@ -69,13 +73,15 @@ public class UtilsTest {
@Test
public void testVerifySuccess() {
try {
Utils.verifySuccess(new TS_Status(TS_StatusCode.SUCCESS_STATUS));
TS_StatusType successStatus = new TS_StatusType(TSStatusType.SUCCESS_STATUS.getStatusCode(), "");
RpcUtils.verifySuccess(new TS_Status(successStatus));
} catch (Exception e) {
fail();
}
try {
Utils.verifySuccess(new TS_Status(TS_StatusCode.ERROR_STATUS));
TS_StatusType errorStatus = new TS_StatusType(TSStatusType.INTERNAL_SERVER_ERROR.getStatusCode(), "");
RpcUtils.verifySuccess(new TS_Status(errorStatus));
} catch (Exception e) {
return;
}
......@@ -114,23 +120,18 @@ public class UtilsTest {
} else {
if (i == 0) {
value.setBool_val((boolean) item[3 * i + 3]);
value.setType(((TSDataType) item[3 * i + 2]).toString());
} else if (i == 1) {
value.setInt_val((int) item[3 * i + 3]);
value.setType(((TSDataType) item[3 * i + 2]).toString());
} else if (i == 2) {
value.setLong_val((long) item[3 * i + 3]);
value.setType(((TSDataType) item[3 * i + 2]).toString());
} else if (i == 3) {
value.setFloat_val((float) item[3 * i + 3]);
value.setType(((TSDataType) item[3 * i + 2]).toString());
} else if (i == 4) {
value.setDouble_val((double) item[3 * i + 3]);
value.setType(((TSDataType) item[3 * i + 2]).toString());
} else {
value.setBinary_val(ByteBuffer.wrap(((String) item[3 * i + 3]).getBytes()));
value.setType(((TSDataType) item[3 * i + 2]).toString());
}
value.setType(item[3 * i + 2].toString());
}
values.add(value);
}
......
......@@ -44,6 +44,19 @@
<url>https://github.com/apache/incubator-iotdb</url>
<tag>rel/0.8</tag>
</scm>
<modules>
<module>tsfile</module>
<module>service-rpc</module>
<module>jdbc</module>
<module>session</module>
<module>client</module>
<module>server</module>
<module>example</module>
<module>grafana</module>
<module>spark-tsfile</module>
<!-- <module>hadoop</module> -->
<module>distribution</module>
</modules>
<!-- Properties Management -->
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
......@@ -377,18 +390,6 @@
<scope>test</scope>
</dependency>
</dependencies>
<modules>
<module>tsfile</module>
<module>service-rpc</module>
<module>jdbc</module>
<module>client</module>
<module>server</module>
<module>example</module>
<module>grafana</module>
<module>spark-tsfile</module>
<!-- <module>hadoop</module> -->
<module>distribution</module>
</modules>
<build>
<pluginManagement>
<plugins>
......
......@@ -27,6 +27,8 @@ rpc_port=6667
rpc_thrift_compression_enable=false
rpc_max_concurrent_client_num=65535
####################
### Dynamic Parameter Adapter Configuration
####################
......@@ -157,7 +159,52 @@ concurrent_flush_thread=0
# whether take over the memory management by IoTDB rather than JVM when serializing memtable as bytes in memory
# (i.e., whether use ChunkBufferPool), value true, false
chunk_buffer_pool_enable = false
chunk_buffer_pool_enable=false
####################
### Merge Configurations
####################
# How many thread will be set up to perform merge main tasks, 1 by default.
# Set to 1 when less than or equal to 0.
merge_thread_num=1
# How many thread will be set up to perform merge chunk sub-tasks, 4 by default.
# Set to 1 when less than or equal to 0.
merge_chunk_subthread_num=4
# If one merge file selection runs for more than this time, it will be ended and its current
# selection will be used as final selection. Unit: millis.
# When < 0, it means time is unbounded.
merge_fileSelection_time_budget=30000
# How much memory may be used in ONE merge task (in byte), 20% of maximum JVM memory by default.
# This is only a rough estimation, starting from a relatively small value to avoid OOM.
# Each new merge thread may take such memory, so merge_thread_num * merge_memory_budget is the
# total memory estimation of merge.
# merge_memory_budget=2147483648
# When set to true, if some crashed merges are detected during system rebooting, such merges will
# be continued, otherwise, the unfinished parts of such merges will not be continued while the
# finished parts still remains as they are.
# If you are feeling the rebooting is too slow, set this to false, false by default
continue_merge_after_reboot=false
# A global merge will be performed each such interval, that is, each storage group will be merged
# (if proper merge candidates can be found). Unit: second, default: 1hours.
# When less than or equal to 0, timed merge is disabled.
merge_interval_sec=3600
# When set to true, all merges becomes full merge (the whole SeqFiles are re-written despite how
# much they are overflowed). This may increase merge overhead depending on how much the SeqFiles
# are overflowed.
force_full_merge=false
# During a merge, if a chunk with less number of points than this parameter, the chunk will be
# merged with its succeeding chunks even if it is not overflowed, until the merged chunks reach
# this threshold and the new chunk will be flushed.
# When less than 0, this mechanism is disabled.
chunk_merge_point_threshold=20480
####################
### Metadata Cache Configuration
......
@REM
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM http://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM
@echo off
echo ````````````````````````
echo Starting Printing the TsFileResources
echo ````````````````````````
PATH %PATH%;%JAVA_HOME%\bin\
set "FULL_VERSION="
set "MAJOR_VERSION="
set "MINOR_VERSION="
for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
set "FULL_VERSION=%%j-%%k-%%l-%%m"
IF "%%j" == "1" (
set "MAJOR_VERSION=%%k"
set "MINOR_VERSION=%%l"
) else (
set "MAJOR_VERSION=%%j"
set "MINOR_VERSION=%%k"
)
)
set JAVA_VERSION=%MAJOR_VERSION%
IF NOT %JAVA_VERSION% == 8 (
IF NOT %JAVA_VERSION% == 11 (
echo IoTDB only supports jdk8 or jdk11, please check your java version.
goto finally
)
)
if "%OS%" == "Windows_NT" setlocal
pushd %~dp0..
if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd%
popd
set IOTDB_CONF=%IOTDB_HOME%\conf
set IOTDB_LOGS=%IOTDB_HOME%\logs
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.db.tools.TsFileResourcePrinter
if NOT DEFINED JAVA_HOME goto :err
@REM -----------------------------------------------------------------------------
@REM JVM Opts we'll use in legacy run or installation
set JAVA_OPTS=-ea^
-Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^
-DIOTDB_HOME="%IOTDB_HOME%"^
-DTSFILE_HOME="%IOTDB_HOME%"^
-DIOTDB_CONF="%IOTDB_CONF%"
@REM ***** CLASSPATH library setting *****
@REM Ensure that any user defined CLASSPATH variables are not used on startup
set CLASSPATH="%IOTDB_HOME%\lib"
REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
for %%i in ("%IOTDB_HOME%\lib\*.jar") do call :append "%%i"
set CLASSPATH=%CLASSPATH%
goto okClasspath
:append
set CLASSPATH=%CLASSPATH%;%1
goto :eof
REM -----------------------------------------------------------------------------
:okClasspath
rem echo CLASSPATH: %CLASSPATH%
set IOTDB_DATA=%IOTDB_HOME%\data
goto finally
:err
echo JAVA_HOME environment variable must be set!
pause
@REM -----------------------------------------------------------------------------
:finally
pause
ENDLOCAL
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
echo ---------------------
echo Starting Printing the TsFileResources
echo ---------------------
if [ -z "${IOTDB_HOME}" ]; then
export IOTDB_HOME="`dirname "$0"`/.."
fi
IOTDB_CONF=${IOTDB_HOME}/conf
if [ -n "$JAVA_HOME" ]; then
for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
if [ -x "$java" ]; then
JAVA="$java"
break
fi
done
else
JAVA=java
fi
if [ -z $JAVA ] ; then
echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
exit 1;
fi
CLASSPATH=""
for f in ${IOTDB_HOME}/lib/*.jar; do
CLASSPATH=${CLASSPATH}":"$f
done
classname=org.apache.iotdb.db.tools.TsFileResourcePrinter
launch_service()
{
class="$1"
iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml"
iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}"
iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}"
iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}"
# iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB"
exec "$JAVA" $iotdb_parms $IOTDB_JMX_OPTS $iotdb_parms -cp "$CLASSPATH" "$class" "$WALPATH"
return $?
}
# Start up the service
#launch_service "$classname"
if [ ! -d ${IOTDB_HOME}/data/wal ]; then
echo "Can't find wal directory."
exit 1;
else
WALPATH=${IOTDB_HOME}/data/wal
launch_service "$classname"
fi
exit $?
......@@ -119,7 +119,7 @@ ArrayList<ParseError> errors = new ArrayList<ParseError>();
private static HashMap<String, String> xlateMap;
static {
//this is used to support auto completion in CLI
//this is used to support auto completion in Client
xlateMap = new HashMap<String, String>();
// Keywords
......
......@@ -23,6 +23,7 @@ import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.iotdb.db.engine.merge.selector.MergeFileStrategy;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.service.TSServiceImpl;
import org.slf4j.Logger;
......@@ -48,6 +49,11 @@ public class IoTDBConfig {
*/
private int rpcPort = 6667;
/**
* Max concurrent client number
*/
private int maxConcurrentClientNum = 65535;
/**
* Memory allocated for the read process
*/
......@@ -133,12 +139,6 @@ public class IoTDBConfig {
*/
private int maxMemtableNumber = 20;
/**
* The maximum concurrent thread number for merging. When the value <=0 or > CPU core number, use
* the CPU core number.
*/
private int mergeConcurrentThreads = Runtime.getRuntime().availableProcessors();
/**
* The amount of data that is read every time when IoTDB merges data.
*/
......@@ -248,6 +248,57 @@ public class IoTDBConfig {
*/
private boolean chunkBufferPoolEnable = false;
/**
* How much memory (in byte) can be used by a single merge task.
*/
private long mergeMemoryBudget = (long) (Runtime.getRuntime().maxMemory() * 0.2);
/**
* How many threads will be set up to perform main merge tasks.
*/
private int mergeThreadNum = 1;
/**
* How many threads will be set up to perform merge chunk sub-tasks.
*/
private int mergeChunkSubThreadNum = 4;
/**
* If one merge file selection runs for more than this time, it will be ended and its current
* selection will be used as final selection. Unit: millis.
* When < 0, it means time is unbounded.
*/
private long mergeFileSelectionTimeBudget = 30 * 1000;
/**
* When set to true, if some crashed merges are detected during system rebooting, such merges will
* be continued, otherwise, the unfinished parts of such merges will not be continued while the
* finished parts still remain as they are.
*/
private boolean continueMergeAfterReboot = true;
/**
* A global merge will be performed each such interval, that is, each storage group will be merged
* (if proper merge candidates can be found). Unit: second.
*/
private long mergeIntervalSec = 2 * 3600L;
/**
* When set to true, all merges becomes full merge (the whole SeqFiles are re-written despite how
* much they are overflowed). This may increase merge overhead depending on how much the SeqFiles
* are overflowed.
*/
private boolean forceFullMerge = false;
/**
* During a merge, if a chunk with less number of chunks than this parameter, the chunk will be
* merged with its succeeding chunks even if it is not overflowed, until the merged chunks reach
* this threshold and the new chunk will be flushed.
*/
private int chunkMergePointThreshold = 20480;
private MergeFileStrategy mergeFileStrategy = MergeFileStrategy.MAX_SERIES_NUM;
public IoTDBConfig() {
// empty constructor
}
......@@ -410,14 +461,6 @@ public class IoTDBConfig {
this.indexFileDir = indexFileDir;
}
public int getMergeConcurrentThreads() {
return mergeConcurrentThreads;
}
void setMergeConcurrentThreads(int mergeConcurrentThreads) {
this.mergeConcurrentThreads = mergeConcurrentThreads;
}
public int getFetchSize() {
return fetchSize;
}
......@@ -470,6 +513,14 @@ public class IoTDBConfig {
this.enableStatMonitor = enableStatMonitor;
}
public int getMaxConcurrentClientNum() {
return maxConcurrentClientNum;
}
public void setMaxConcurrentClientNum(int maxConcurrentClientNum) {
this.maxConcurrentClientNum = maxConcurrentClientNum;
}
public int getStatMonitorDetectFreqSec() {
return statMonitorDetectFreqSec;
}
......@@ -582,6 +633,38 @@ public class IoTDBConfig {
this.chunkBufferPoolEnable = chunkBufferPoolEnable;
}
public long getMergeMemoryBudget() {
return mergeMemoryBudget;
}
public void setMergeMemoryBudget(long mergeMemoryBudget) {
this.mergeMemoryBudget = mergeMemoryBudget;
}
public int getMergeThreadNum() {
return mergeThreadNum;
}
public void setMergeThreadNum(int mergeThreadNum) {
this.mergeThreadNum = mergeThreadNum;
}
public boolean isContinueMergeAfterReboot() {
return continueMergeAfterReboot;
}
public void setContinueMergeAfterReboot(boolean continueMergeAfterReboot) {
this.continueMergeAfterReboot = continueMergeAfterReboot;
}
public long getMergeIntervalSec() {
return mergeIntervalSec;
}
public void setMergeIntervalSec(long mergeIntervalSec) {
this.mergeIntervalSec = mergeIntervalSec;
}
public boolean isEnableParameterAdapter() {
return enableParameterAdapter;
}
......@@ -630,6 +713,22 @@ public class IoTDBConfig {
this.performanceStatMemoryInKB = performanceStatMemoryInKB;
}
public boolean isForceFullMerge() {
return forceFullMerge;
}
public void setForceFullMerge(boolean forceFullMerge) {
this.forceFullMerge = forceFullMerge;
}
public int getChunkMergePointThreshold() {
return chunkMergePointThreshold;
}
public void setChunkMergePointThreshold(int chunkMergePointThreshold) {
this.chunkMergePointThreshold = chunkMergePointThreshold;
}
public long getMemtableSizeThreshold() {
return memtableSizeThreshold;
}
......@@ -637,6 +736,31 @@ public class IoTDBConfig {
public void setMemtableSizeThreshold(long memtableSizeThreshold) {
this.memtableSizeThreshold = memtableSizeThreshold;
}
public MergeFileStrategy getMergeFileStrategy() {
return mergeFileStrategy;
}
public void setMergeFileStrategy(
MergeFileStrategy mergeFileStrategy) {
this.mergeFileStrategy = mergeFileStrategy;
}
public int getMergeChunkSubThreadNum() {
return mergeChunkSubThreadNum;
}
public void setMergeChunkSubThreadNum(int mergeChunkSubThreadNum) {
this.mergeChunkSubThreadNum = mergeChunkSubThreadNum;
}
public long getMergeFileSelectionTimeBudget() {
return mergeFileSelectionTimeBudget;
}
public void setMergeFileSelectionTimeBudget(long mergeFileSelectionTimeBudget) {
this.mergeFileSelectionTimeBudget = mergeFileSelectionTimeBudget;
}
public boolean isRpcThriftCompressionEnable() {
return rpcThriftCompressionEnable;
......
......@@ -26,7 +26,7 @@ public class IoTDBConstant {
public static final String ENV_FILE_NAME = "iotdb-env";
public static final String IOTDB_CONF = "IOTDB_CONF";
public static final String GLOBAL_DB_NAME = "IoTDB";
public static final String VERSION = "0.7.0";
public static final String VERSION = "0.9.0-SNAPSHOT";
public static final String REMOTE_JMX_PORT_NAME = "com.sun.management.jmxremote.port";
public static final String IOTDB_LOCAL_JMX_PORT_NAME = "iotdb.jmx.local.port";
public static final String IOTDB_REMOTE_JMX_PORT_NAME = "iotdb.jmx.remote.port";
......
......@@ -170,14 +170,6 @@ public class IoTDBDescriptor {
conf.setMultiDirStrategyClassName(properties.getProperty("multi_dir_strategy",
conf.getMultiDirStrategyClassName()));
conf.setMergeConcurrentThreads(Integer
.parseInt(properties.getProperty("merge_concurrent_threads",
Integer.toString(conf.getMergeConcurrentThreads()))));
if (conf.getMergeConcurrentThreads() <= 0
|| conf.getMergeConcurrentThreads() > Runtime.getRuntime().availableProcessors()) {
conf.setMergeConcurrentThreads(Runtime.getRuntime().availableProcessors());
}
conf.setFetchSize(Integer.parseInt(properties.getProperty("fetch_size",
Integer.toString(conf.getFetchSize()))));
......@@ -231,6 +223,25 @@ public class IoTDBDescriptor {
conf.setZoneID(ZoneId.of(tmpTimeZone.trim()));
logger.info("Time zone has been set to {}", conf.getZoneID());
conf.setMergeMemoryBudget(Long.parseLong(properties.getProperty("merge_memory_budget",
Long.toString(conf.getMergeMemoryBudget()))));
conf.setMergeThreadNum(Integer.parseInt(properties.getProperty("merge_thread_num",
Integer.toString(conf.getMergeThreadNum()))));
conf.setMergeChunkSubThreadNum(Integer.parseInt(properties.getProperty
("merge_chunk_subthread_num",
Integer.toString(conf.getMergeChunkSubThreadNum()))));
conf.setContinueMergeAfterReboot(Boolean.parseBoolean(properties.getProperty(
"continue_merge_after_reboot", Boolean.toString(conf.isContinueMergeAfterReboot()))));
conf.setMergeFileSelectionTimeBudget(Long.parseLong(properties.getProperty
("merge_fileSelection_time_budget",
Long.toString(conf.getMergeFileSelectionTimeBudget()))));
conf.setMergeIntervalSec(Long.parseLong(properties.getProperty("merge_interval_sec",
Long.toString(conf.getMergeIntervalSec()))));
conf.setForceFullMerge(Boolean.parseBoolean(properties.getProperty("force_full_merge",
Boolean.toString(conf.isForceFullMerge()))));
conf.setChunkMergePointThreshold(Integer.parseInt(properties.getProperty(
"chunk_merge_point_threshold", Integer.toString(conf.getChunkMergePointThreshold()))));
conf.setEnablePerformanceStat(Boolean
.parseBoolean(properties.getProperty("enable_performance_stat",
Boolean.toString(conf.isEnablePerformanceStat())).trim()));
......@@ -241,6 +252,15 @@ public class IoTDBDescriptor {
conf.setPerformanceStatMemoryInKB(Integer
.parseInt(properties.getProperty("performance_stat_memory_in_kb",
Integer.toString(conf.getPerformanceStatMemoryInKB())).trim()));
int maxConcurrentClientNum = Integer.parseInt(properties.
getProperty("max_concurrent_client_num",
Integer.toString(conf.getMaxConcurrentClientNum()).trim()));
if (maxConcurrentClientNum <= 0) {
maxConcurrentClientNum = 65535;
}
conf.setMaxConcurrentClientNum(maxConcurrentClientNum);
} catch (IOException e) {
logger.warn("Cannot load config file because, use default configuration", e);
} catch (Exception e) {
......
......@@ -253,7 +253,7 @@ public class Measurement implements MeasurementMBean, IService {
service = IoTDBThreadPoolFactory.newScheduledThreadPool(
2, ThreadName.TIME_COST_STATSTIC.getName());
}
//we have to check again because someone may channge the value.
//we have to check again because someone may change the value.
isEnableStat = IoTDBDescriptor.getInstance().getConfig().isEnablePerformanceStat();
if (isEnableStat) {
consumeFuture = service.schedule(new QueueConsumerThread(), 0, TimeUnit.MILLISECONDS);
......
......@@ -19,8 +19,9 @@
package org.apache.iotdb.db.cost.statistic;
public enum Operation {
EXECUTE_BATCH("EXECUTE_BATCH"),
EXECUTE_JDBC_BATCH("EXECUTE_JDBC_BATCH"),
EXECUTE_ONE_SQL_IN_BATCH("EXECUTE_ONE_SQL_IN_BATCH"),
EXECUTE_RPC_BATCH_INSERT("EXECUTE_RPC_BATCH_INSERT"),
EXECUTE_QUERY("EXECUTE_QUERY");
public String getName() {
......
......@@ -37,8 +37,10 @@ import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.StorageEngineFailureException;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.qp.physical.crud.BatchInsertPlan;
import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.control.JobFileManager;
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.ServiceType;
import org.apache.iotdb.db.utils.FilePathUtils;
......@@ -148,7 +150,7 @@ public class StorageEngine implements IService {
/**
* execute an InsertPlan on a storage group.
* insert an InsertPlan to a storage group.
*
* @param insertPlan physical plan of insertion
* @return true if and only if this insertion succeeds
......@@ -169,6 +171,25 @@ public class StorageEngine implements IService {
return storageGroupProcessor.insert(insertPlan);
}
/**
* insert a BatchInsertPlan to a storage group
* @return result of each row
*/
public Integer[] insertBatch(BatchInsertPlan batchInsertPlan) throws StorageEngineException {
StorageGroupProcessor storageGroupProcessor;
try {
storageGroupProcessor = getProcessor(batchInsertPlan.getDeviceId());
} catch (Exception e) {
logger.warn("get StorageGroupProcessor of device {} failed, because {}",
batchInsertPlan.getDeviceId(),
e.getMessage(), e);
throw new StorageEngineException(e);
}
// TODO monitor: update statistics
return storageGroupProcessor.insertBatch(batchInsertPlan);
}
/**
* only for unit test
*/
......@@ -209,37 +230,17 @@ public class StorageEngine implements IService {
}
}
/**
* begin a query on a given deviceId. Any TsFile contains such device should not be deleted at
* once after merge.
*
* @param deviceId queried deviceId
* @return a token for the query.
*/
public int beginQuery(String deviceId) throws StorageEngineException {
// TODO implement it when developing the merge function
return -1;
}
/**
* end query on a given deviceId. If some TsFile has been merged and this query is the last query
* using it, the TsFile can be deleted safely.
*/
public void endQuery(String deviceId, int token) throws StorageEngineException {
// TODO implement it when developing the merge function
}
/**
* query data.
*/
public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context)
public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context,
JobFileManager filePathsManager)
throws StorageEngineException {
//TODO use context.
String deviceId = seriesExpression.getSeriesPath().getDevice();
String measurementId = seriesExpression.getSeriesPath().getMeasurement();
StorageGroupProcessor storageGroupProcessor = getProcessor(deviceId);
return storageGroupProcessor.query(deviceId, measurementId, context);
return storageGroupProcessor.query(deviceId, measurementId, context, filePathsManager);
}
/**
......@@ -284,8 +285,13 @@ public class StorageEngine implements IService {
*
* @throws StorageEngineException StorageEngineException
*/
public void mergeAll() throws StorageEngineException {
// TODO
public void mergeAll(boolean fullMerge) throws StorageEngineException {
if (IoTDBDescriptor.getInstance().getConfig().isReadOnly()) {
throw new StorageEngineException("Current system mode is read only, does not support merge");
}
for (StorageGroupProcessor storageGroupProcessor : processorMap.values()) {
storageGroupProcessor.merge(fullMerge);
}
}
/**
......
......@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadata;
import org.apache.iotdb.tsfile.file.metadata.TsFileMetaData;
......@@ -85,16 +86,17 @@ public class DeviceMetaDataCache {
/**
* get {@link ChunkMetaData}. THREAD SAFE.
*/
public List<ChunkMetaData> get(String filePath, Path seriesPath)
public List<ChunkMetaData> get(TsFileResource resource, Path seriesPath)
throws IOException {
if (!cacheEnable) {
TsFileMetaData fileMetaData = TsFileMetaDataCache.getInstance().get(filePath);
TsFileMetaData fileMetaData = TsFileMetaDataCache.getInstance().get(resource);
TsDeviceMetadata deviceMetaData = TsFileMetadataUtils
.getTsDeviceMetaData(filePath, seriesPath, fileMetaData);
.getTsDeviceMetaData(resource, seriesPath, fileMetaData);
return TsFileMetadataUtils.getChunkMetaDataList(seriesPath.getMeasurement(), deviceMetaData);
}
StringBuilder builder = new StringBuilder(filePath).append(".").append(seriesPath.getDevice());
StringBuilder builder = new StringBuilder(resource.getFile().getPath()).append(".").append(seriesPath
.getDevice());
String pathDeviceStr = builder.toString();
String key = builder.append(".").append(seriesPath.getMeasurement()).toString();
Object devicePathObject = pathDeviceStr.intern();
......@@ -123,9 +125,9 @@ public class DeviceMetaDataCache {
logger.debug("Cache didn't hit: the number of requests for cache is {}",
cacheRequestNum.get());
}
TsFileMetaData fileMetaData = TsFileMetaDataCache.getInstance().get(filePath);
TsFileMetaData fileMetaData = TsFileMetaDataCache.getInstance().get(resource);
TsDeviceMetadata deviceMetaData = TsFileMetadataUtils
.getTsDeviceMetaData(filePath, seriesPath, fileMetaData);
.getTsDeviceMetaData(resource, seriesPath, fileMetaData);
// If measurement isn't included in the tsfile, empty list is returned.
if (deviceMetaData == null) {
return new ArrayList<>();
......@@ -193,4 +195,10 @@ public class DeviceMetaDataCache {
private static final DeviceMetaDataCache INSTANCE = new
DeviceMetaDataCache(MEMORY_THRESHOLD_IN_B);
}
public void remove(TsFileResource resource) {
synchronized (lruCache) {
lruCache.entrySet().removeIf(e -> e.getKey().startsWith(resource.getFile().getPath()));
}
}
}
\ No newline at end of file
......@@ -22,7 +22,9 @@ import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.tsfile.file.metadata.TsFileMetaData;
import org.apache.iotdb.tsfile.read.common.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -39,7 +41,7 @@ public class TsFileMetaDataCache {
/**
* key: Tsfile path. value: TsFileMetaData
*/
private LRULinkedHashMap<String, TsFileMetaData> cache;
private LRULinkedHashMap<TsFileResource, TsFileMetaData> cache;
private AtomicLong cacheHitNum = new AtomicLong();
private AtomicLong cacheRequestNum = new AtomicLong();
......@@ -60,9 +62,9 @@ public class TsFileMetaDataCache {
if (!cacheEnable) {
return;
}
cache = new LRULinkedHashMap<String, TsFileMetaData>(MEMORY_THRESHOLD_IN_B, true) {
cache = new LRULinkedHashMap<TsFileResource, TsFileMetaData>(MEMORY_THRESHOLD_IN_B, true) {
@Override
protected long calEntrySize(String key, TsFileMetaData value) {
protected long calEntrySize(TsFileResource key, TsFileMetaData value) {
if (deviceIndexMapEntrySize == 0 && value.getDeviceMap().size() > 0) {
deviceIndexMapEntrySize = RamUsageEstimator
.sizeOf(value.getDeviceMap().entrySet().iterator().next());
......@@ -74,7 +76,7 @@ public class TsFileMetaDataCache {
long valueSize = value.getDeviceMap().size() * deviceIndexMapEntrySize
+ measurementSchemaEntrySize * value.getMeasurementSchema().size()
+ versionAndCreatebySize;
return key.length() * 2 + valueSize;
return key.getFile().getPath().length() * 2 + valueSize;
}
};
}
......@@ -84,15 +86,16 @@ public class TsFileMetaDataCache {
}
/**
* get the TsFileMetaData for given path.
* get the TsFileMetaData for given TsFile.
*
* @param path -given path
* @param tsFileResource -given TsFile
*/
public TsFileMetaData get(String path) throws IOException {
public TsFileMetaData get(TsFileResource tsFileResource) throws IOException {
if (!cacheEnable) {
return TsFileMetadataUtils.getTsFileMetaData(path);
return TsFileMetadataUtils.getTsFileMetaData(tsFileResource);
}
String path = tsFileResource.getFile().getPath();
Object internPath = path.intern();
cacheRequestNum.incrementAndGet();
synchronized (cache) {
......@@ -118,18 +121,18 @@ public class TsFileMetaDataCache {
logger.debug("Cache didn't hit: the number of requests for cache is {}",
cacheRequestNum.get());
}
TsFileMetaData fileMetaData = TsFileMetadataUtils.getTsFileMetaData(path);
TsFileMetaData fileMetaData = TsFileMetadataUtils.getTsFileMetaData(tsFileResource);
synchronized (cache) {
cache.put(path, fileMetaData);
cache.put(tsFileResource, fileMetaData);
return fileMetaData;
}
}
}
public void remove(String path) {
public void remove(TsFileResource resource) {
synchronized (cache) {
if (cache != null) {
cache.remove(path);
cache.remove(resource);
}
}
}
......
......@@ -20,10 +20,12 @@ package org.apache.iotdb.db.engine.cache;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.query.control.FileReaderManager;
import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetaData;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
......@@ -46,30 +48,30 @@ public class TsFileMetadataUtils {
/**
* get tsfile meta data.
*
* @param filePath -given path
* @param resource -given TsFile
* @return -meta data
*/
public static TsFileMetaData getTsFileMetaData(String filePath) throws IOException {
TsFileSequenceReader reader = FileReaderManager.getInstance().get(filePath, true);
public static TsFileMetaData getTsFileMetaData(TsFileResource resource) throws IOException {
TsFileSequenceReader reader = FileReaderManager.getInstance().get(resource, true);
return reader.readFileMetadata();
}
/**
* get row group block meta data.
*
* @param filePath -file path
* @param resource -TsFile
* @param seriesPath -series path
* @param fileMetaData -tsfile meta data
* @return -device meta data
*/
public static TsDeviceMetadata getTsDeviceMetaData(String filePath, Path seriesPath,
public static TsDeviceMetadata getTsDeviceMetaData(TsFileResource resource, Path seriesPath,
TsFileMetaData fileMetaData) throws IOException {
if (!fileMetaData.getMeasurementSchema().containsKey(seriesPath.getMeasurement())) {
return null;
} else {
// get the index information of TsDeviceMetadata
TsDeviceMetadataIndex index = fileMetaData.getDeviceMetadataIndex(seriesPath.getDevice());
TsFileSequenceReader tsFileReader = FileReaderManager.getInstance().get(filePath, true);
TsFileSequenceReader tsFileReader = FileReaderManager.getInstance().get(resource, true);
// read TsDeviceMetadata from file
return tsFileReader.readTsDeviceMetaData(index);
}
......@@ -95,6 +97,9 @@ public class TsFileMetadataUtils {
}
}
}
for (List<ChunkMetaData> chunkMetaDataList : pathToChunkMetaDataList.values()) {
chunkMetaDataList.sort(Comparator.comparingLong(ChunkMetaData::getStartTime));
}
return pathToChunkMetaDataList;
}
......
......@@ -96,7 +96,7 @@ public class FlushManager implements FlushManagerMBean, IService {
}
/**
* Add BufferWriteProcessor to asyncTryToFlush manager
* Add TsFileProcessor to asyncTryToFlush manager
*/
@SuppressWarnings("squid:S2445")
public void registerTsFileProcessor(TsFileProcessor tsFileProcessor) {
......
......@@ -30,7 +30,7 @@ import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
import org.apache.iotdb.tsfile.write.chunk.IChunkWriter;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
import org.slf4j.Logger;
......@@ -50,14 +50,14 @@ public class MemTableFlushTask {
private String storageGroup;
private IMemTable memTable;
private FileSchema fileSchema;
private Schema schema;
private volatile boolean noMoreEncodingTask = false;
private volatile boolean noMoreIOTask = false;
public MemTableFlushTask(IMemTable memTable, FileSchema fileSchema, RestorableTsFileIOWriter writer, String storageGroup) {
public MemTableFlushTask(IMemTable memTable, Schema schema, RestorableTsFileIOWriter writer, String storageGroup) {
this.memTable = memTable;
this.fileSchema = fileSchema;
this.schema = schema;
this.writer = writer;
this.storageGroup = storageGroup;
subTaskPoolManager.submit(encodingTask);
......@@ -78,7 +78,7 @@ public class MemTableFlushTask {
for (String measurementId : memTable.getMemTableMap().get(deviceId).keySet()) {
long startTime = System.currentTimeMillis();
IWritableMemChunk series = memTable.getMemTableMap().get(deviceId).get(measurementId);
MeasurementSchema desc = fileSchema.getMeasurementSchema(measurementId);
MeasurementSchema desc = schema.getMeasurementSchema(measurementId);
TVList tvList = series.getSortedTVList();
sortTime += System.currentTimeMillis() - startTime;
encodingTaskQueue.add(new Pair<>(tvList, desc));
......@@ -168,8 +168,7 @@ public class MemTableFlushTask {
Pair<TVList, MeasurementSchema> encodingMessage = (Pair<TVList, MeasurementSchema>) task;
ChunkBuffer chunkBuffer = ChunkBufferPool.getInstance()
.getEmptyChunkBuffer(this, encodingMessage.right);
IChunkWriter seriesWriter = new ChunkWriterImpl(encodingMessage.right, chunkBuffer,
PAGE_SIZE_THRESHOLD);
IChunkWriter seriesWriter = new ChunkWriterImpl(chunkBuffer, PAGE_SIZE_THRESHOLD);
writeOneSeries(encodingMessage.left, seriesWriter, encodingMessage.right.getType());
ioTaskQueue.add(seriesWriter);
memSerializeTime += System.currentTimeMillis() - starTime;
......
......@@ -26,6 +26,7 @@ import java.util.Map.Entry;
import org.apache.iotdb.db.engine.modification.Deletion;
import org.apache.iotdb.db.engine.modification.Modification;
import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
import org.apache.iotdb.db.qp.physical.crud.BatchInsertPlan;
import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.rescon.TVListAllocator;
import org.apache.iotdb.db.utils.MemUtils;
......@@ -88,6 +89,14 @@ public abstract class AbstractMemTable implements IMemTable {
memSize += recordSizeInByte;
}
@Override
public void insertBatch(BatchInsertPlan batchInsertPlan, List<Integer> indexes) {
write(batchInsertPlan, indexes);
long recordSizeInByte = MemUtils.getRecordSize(batchInsertPlan);
memSize += recordSizeInByte;
}
@Override
public void write(String deviceId, String measurement, TSDataType dataType, long insertTime,
String insertValue) {
......@@ -96,13 +105,15 @@ public abstract class AbstractMemTable implements IMemTable {
}
@Override
public void write(String deviceId, String measurement, TSDataType dataType, long insertTime,
Object value) {
IWritableMemChunk memSeries = createIfNotExistAndGet(deviceId, measurement, dataType);
memSeries.write(insertTime, value);
// update memory size of current memtable
public void write(BatchInsertPlan batchInsertPlan, List<Integer> indexes) {
for (int i = 0; i < batchInsertPlan.getMeasurements().length; i++) {
IWritableMemChunk memSeries = createIfNotExistAndGet(batchInsertPlan.getDeviceId(),
batchInsertPlan.getMeasurements()[i], batchInsertPlan.getDataTypes()[i]);
memSeries.write(batchInsertPlan.getTimes(), batchInsertPlan.getColumns()[i], batchInsertPlan.getDataTypes()[i], indexes);
}
}
@Override
public long size() {
long sum = 0;
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册