提交 bd299243 编写于 作者: S SilverNarcissus 提交者: Jialin Qiao

Fix spark package name (#518)

* update spark package
上级 7d787906
......@@ -168,7 +168,6 @@ Then, the `mapper` and `reducer` class is how you deal with the `MapWritable` pr
```
public static class TSMapper extends Mapper<NullWritable, MapWritable, Text, MapWritable> {
@Override
protected void map(NullWritable key, MapWritable value,
Mapper<NullWritable, MapWritable, Text, MapWritable>.Context context)
......
......@@ -49,7 +49,7 @@ mvn clean scala:compile compile install
```
spark-shell --jars spark-iotdb-connector-0.9.0-SNAPSHOT.jar,iotdb-jdbc-0.9.0-SNAPSHOT-jar-with-dependencies.jar
import org.apache.iotdb.sparkdb._
import org.apache.iotdb.spark.db._
val df = spark.read.format("org.apache.iotdb.sparkdb").option("url","jdbc:iotdb://127.0.0.1:6667/").option("sql","select * from root").load
......@@ -62,7 +62,7 @@ df.show()
```
spark-shell --jars spark-iotdb-connector-0.9.0-SNAPSHOT.jar,iotdb-jdbc-0.9.0-SNAPSHOT-jar-with-dependencies.jar
import org.apache.iotdb.sparkdb._
import org.apache.iotdb.spark.db._
val df = spark.read.format("org.apache.iotdb.sparkdb").option("url","jdbc:iotdb://127.0.0.1:6667/").option("sql","select * from root").
option("lowerBound", [lower bound of time that you want query(include)]).option("upperBound", [upper bound of time that you want query(include)]).
......@@ -129,7 +129,7 @@ You can also use narrow table form which as follows: (You can see part 4 about h
## from wide to narrow
```
import org.apache.iotdb.sparkdb._
import org.apache.iotdb.spark.db._
val wide_df = spark.read.format("org.apache.iotdb.sparkdb").option("url", "jdbc:iotdb://127.0.0.1:6667/").option("sql", "select * from root where time < 1100 and time > 1000").load
val narrow_df = Transformer.toNarrowForm(spark, wide_df)
......@@ -137,7 +137,7 @@ val narrow_df = Transformer.toNarrowForm(spark, wide_df)
## from narrow to wide
```
import org.apache.iotdb.sparkdb._
import org.apache.iotdb.spark.db._
val wide_df = Transformer.toWideForm(spark, narrow_df)
```
......@@ -147,7 +147,7 @@ val wide_df = Transformer.toWideForm(spark, narrow_df)
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.iotdb.sparkdb.*
import org.apache.iotdb.spark.db.*
public class Example {
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb;
package org.apache.iotdb.spark.db;
/**
* this class contains several constants used in SQL.
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import java.sql.{Statement, _}
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.sources.{BaseRelation, DataSourceRegister, RelationProvider}
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
class IoTDBOptions(
@transient private val parameters: Map[String, String])
......
......@@ -16,15 +16,15 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.sources._
import java.sql.{Connection, DriverManager, ResultSet, Statement}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.{Partition, SparkContext, TaskContext}
//IoTDB data partition
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import org.apache.spark.Partition
import org.apache.spark.rdd.RDD
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import org.apache.spark.sql.{SparkSession, _}
import org.apache.spark.sql.types._
......
......@@ -16,13 +16,13 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark
import org.apache.spark.sql.{DataFrame, DataFrameReader}
package object sparkdb {
package object db {
val myPackage = "org.apache.iotdb.sparkdb"
val myPackage = "org.apache.iotdb.spark.db"
/**
* Adds a method, `iotdb`, to DataFrameReader that allows you to read data from IoTDB using
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb;
package org.apache.iotdb.spark.db;
import java.io.File;
import java.io.IOException;
......
......@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.sparkdb
package org.apache.iotdb.spark.db
import java.io.ByteArrayOutputStream
......
......@@ -33,10 +33,10 @@ package object tsfile {
isNarrowForm: Boolean = false): DataFrame = {
if (isNarrowForm) {
reader.option(DefaultSource.path, path).option(DefaultSource.isNarrowForm, "narrow_form").
format("org.apache.iotdb.tsfile").load
format("org.apache.iotdb.spark.tsfile").load
}
else {
reader.option(DefaultSource.path, path).format("org.apache.iotdb.tsfile").load
reader.option(DefaultSource.path, path).format("org.apache.iotdb.spark.tsfile").load
}
}
}
......@@ -49,7 +49,7 @@ package object tsfile {
isNarrowForm: Boolean = false): Unit = {
if (isNarrowForm) {
writer.option(DefaultSource.path, path).option(DefaultSource.isNarrowForm, "narrow_form").
format("org.apache.iotdb.tsfile").save
format("org.apache.iotdb.spark.tsfile").save
}
else {
writer.option(DefaultSource.path, path).format("org.apache.iotdb.tsfile").save
......
......@@ -20,6 +20,7 @@ package org.apache.iotdb.spark.tsfile
import java.io.{ByteArrayOutputStream, File}
import java.net.URI
import java.util
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
......@@ -376,8 +377,9 @@ device_2: 400000 rows, time range [0,799998], interval 2
"|131047|device_1 |null |131047 |null |\n" +
"|131048|device_1 |null |131048 |null |\n" +
"|131049|device_1 |null |131049 |null |\n" +
"+------+-----------+--------+--------+--------+\n"
"+------+-----------+--------+--------+--------+"
println("???" + util.Arrays.toString(actual))
Assert.assertArrayEquals(expect.toCharArray, actual.dropRight(2))
reader.close() // DO NOT FORGET THIS
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册