提交 064523e2 编写于 作者: Q qiaojialin

rename FileSchema to Schema

上级 6ce1f413
......@@ -85,12 +85,12 @@ A TsFile can be generated by following three steps and the complete code will be
```
* With pre-defined schema
```
public TsFileWriter(File file, FileSchema schema) throws IOException
public TsFileWriter(File file, Schema schema) throws IOException
```
This one is for using the HDFS file system. `TsFileOutput` can be an instance of class `HDFSOutput`.
```
public TsFileWriter(TsFileOutput output, FileSchema schema) throws IOException
public TsFileWriter(TsFileOutput output, Schema schema) throws IOException
```
**Parameters:**
......@@ -100,15 +100,15 @@ A TsFile can be generated by following three steps and the complete code will be
* Second, add measurements
Or you can make an instance of class `FileSchema` first and pass this to the constructor of class `TsFileWriter`
Or you can make an instance of class `Schema` first and pass this to the constructor of class `TsFileWriter`
The class `FileSchema` contains a map whose key is the name of one measurement schema, and the value is the schema itself.
The class `Schema` contains a map whose key is the name of one measurement schema, and the value is the schema itself.
Here are the interfaces:
```
// Create an empty FileSchema or from an existing map
public FileSchema()
public FileSchema(Map<String, MeasurementSchema> measurements)
// Create an empty Schema or from an existing map
public Schema()
public Schema(Map<String, MeasurementSchema> measurements)
// Use this two interfaces to add measurements
public void registerMeasurement(MeasurementSchema descriptor)
......@@ -275,7 +275,7 @@ public class TsFileWriteWithRowBatch {
f.delete();
}
FileSchema schema = new FileSchema();
Schema schema = new Schema();
// the number of rows to include in the row batch
int rowNum = 1000000;
......
......@@ -30,7 +30,7 @@ import org.apache.iotdb.tsfile.hadoop.io.HDFSOutputStream;
import org.apache.iotdb.tsfile.timeseries.basis.TsFile;
import org.apache.iotdb.tsfile.write.exception.InvalidJsonSchemaException;
import org.apache.iotdb.tsfile.write.exception.WriteProcessException;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
import org.apache.iotdb.tsfile.write.schema.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -42,9 +42,9 @@ public class TSFRecordWriter extends RecordWriter<NullWritable, TSRow> {
public TSFRecordWriter(Path path, JSONObject schema) throws InterruptedException, IOException {
// construct the internalrecordwriter
FileSchema fileSchema = null;
Schema fileSchema = null;
try {
fileSchema = new FileSchema(schema);
fileSchema = new Schema(schema);
} catch (InvalidJsonSchemaException e) {
throw new InterruptedException(String.format("Construct the tsfile schema failed"), e);
}
......
......@@ -164,7 +164,7 @@ public class StorageGroupProcessor {
this.storageGroupName = storageGroupName;
// construct the file schema
this.schema = constructFileSchema(storageGroupName);
this.schema = constructSchema(storageGroupName);
try {
File storageGroupSysDir = new File(systemInfoDir, storageGroupName);
......@@ -249,7 +249,7 @@ public class StorageGroupProcessor {
}
}
private Schema constructFileSchema(String storageGroupName) {
private Schema constructSchema(String storageGroupName) {
List<MeasurementSchema> columnSchemaList;
columnSchemaList = MManager.getInstance().getSchemaForStorageGroup(storageGroupName);
......
......@@ -25,9 +25,9 @@ import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
public class FileSchemaUtils {
public class SchemaUtils {
private FileSchemaUtils(){}
private SchemaUtils(){}
/**
* Construct the Schema of the FileNode named processorName.
......@@ -35,19 +35,19 @@ public class FileSchemaUtils {
* @return the schema of the FileNode named processorName.
* @throws WriteProcessException when the fileSchema cannot be created.
*/
public static Schema constructFileSchema(String processorName) {
public static Schema constructSchema(String processorName) {
List<MeasurementSchema> columnSchemaList;
columnSchemaList = MManager.getInstance().getSchemaForStorageGroup(processorName);
return getFileSchemaFromColumnSchema(columnSchemaList);
return getSchemaFromColumnSchema(columnSchemaList);
}
/**
* getFileSchemaFromColumnSchema construct a Schema using the schema of the columns and the
* getSchemaFromColumnSchema construct a Schema using the schema of the columns and the
* device type.
* @param schemaList the schema of the columns in this file.
* @return a Schema contains the provided schemas.
*/
public static Schema getFileSchemaFromColumnSchema(List<MeasurementSchema> schemaList) {
public static Schema getSchemaFromColumnSchema(List<MeasurementSchema> schemaList) {
Schema schema = new Schema();
for (MeasurementSchema measurementSchema : schemaList) {
schema.registerMeasurement(measurementSchema);
......
......@@ -36,7 +36,7 @@ import org.apache.iotdb.db.exception.TsFileProcessorException;
import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.db.utils.FileSchemaUtils;
import org.apache.iotdb.db.utils.SchemaUtils;
import org.apache.iotdb.db.utils.TimeValuePair;
import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetaData;
......@@ -77,7 +77,7 @@ public class TsFileProcessorTest {
public void testWriteAndFlush()
throws WriteProcessException, IOException, TsFileProcessorException {
processor = new TsFileProcessor(storageGroup, new File(filePath),
FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
},
() -> true, true);
......@@ -124,7 +124,7 @@ public class TsFileProcessorTest {
public void testWriteAndRestoreMetadata()
throws IOException {
processor = new TsFileProcessor(storageGroup, new File(filePath),
FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
},
() -> true, true);
......@@ -191,7 +191,7 @@ public class TsFileProcessorTest {
public void testMultiFlush()
throws WriteProcessException, IOException, TsFileProcessorException {
processor = new TsFileProcessor(storageGroup, new File(filePath),
FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
},
() -> true, true);
......@@ -227,7 +227,7 @@ public class TsFileProcessorTest {
public void testWriteAndClose()
throws WriteProcessException, IOException {
processor = new TsFileProcessor(storageGroup, new File(filePath),
FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE,
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE,
unsealedTsFileProcessor -> {
TsFileResource resource = unsealedTsFileProcessor.getTsFileResource();
synchronized (resource) {
......
......@@ -49,7 +49,7 @@ public class ReadPageInMemTest {
private int pageCheckSizeThreshold;
private int defaultMaxStringLength;
private static Schema getFileSchema() {
private static Schema getSchema() {
Schema schema = new Schema();
TSFileConfig conf = TSFileDescriptor.getInstance().getConfig();
schema.registerMeasurement(new MeasurementSchema("s1", TSDataType.INT32, TSEncoding.valueOf(conf.valueEncoder)));
......@@ -70,7 +70,7 @@ public class ReadPageInMemTest {
conf.pageCheckSizeThreshold = 1;
defaultMaxStringLength = conf.maxStringLength;
conf.maxStringLength = 2;
schema = getFileSchema();
schema = getSchema();
innerWriter = new TsFileWriter(new File(filePath), schema, conf);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册