提交 8a3acb79 编写于 作者: R Reynold Xin

[SPARK-13794][SQL] Rename DataFrameWriter.stream() DataFrameWriter.startStream()

## What changes were proposed in this pull request?
The new name makes it more obvious with the verb "start" that we are actually starting some execution.

## How was this patch tested?
This is just a rename. Existing unit tests should cover it.

Author: Reynold Xin <rxin@databricks.com>

Closes #11627 from rxin/SPARK-13794.
上级 aa0eba2c
...@@ -206,7 +206,7 @@ final class DataFrameWriter private[sql](df: DataFrame) { ...@@ -206,7 +206,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
} }
/** /**
* Specifies the name of the [[ContinuousQuery]] that can be started with `stream()`. * Specifies the name of the [[ContinuousQuery]] that can be started with `startStream()`.
* This name must be unique among all the currently active queries in the associated SQLContext. * This name must be unique among all the currently active queries in the associated SQLContext.
* *
* @since 2.0.0 * @since 2.0.0
...@@ -223,8 +223,8 @@ final class DataFrameWriter private[sql](df: DataFrame) { ...@@ -223,8 +223,8 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* *
* @since 2.0.0 * @since 2.0.0
*/ */
def stream(path: String): ContinuousQuery = { def startStream(path: String): ContinuousQuery = {
option("path", path).stream() option("path", path).startStream()
} }
/** /**
...@@ -234,7 +234,7 @@ final class DataFrameWriter private[sql](df: DataFrame) { ...@@ -234,7 +234,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* *
* @since 2.0.0 * @since 2.0.0
*/ */
def stream(): ContinuousQuery = { def startStream(): ContinuousQuery = {
val dataSource = val dataSource =
DataSource( DataSource(
df.sqlContext, df.sqlContext,
......
...@@ -72,7 +72,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -72,7 +72,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.stream() .stream()
.write .write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.stream() .startStream()
.stop() .stop()
} }
...@@ -82,7 +82,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -82,7 +82,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.stream() .stream()
.write .write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.stream() .startStream()
.stop() .stop()
} }
...@@ -108,7 +108,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -108,7 +108,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.option("opt1", "1") .option("opt1", "1")
.options(Map("opt2" -> "2")) .options(Map("opt2" -> "2"))
.options(map) .options(map)
.stream() .startStream()
.stop() .stop()
assert(LastOptions.parameters("opt1") == "1") assert(LastOptions.parameters("opt1") == "1")
...@@ -123,14 +123,14 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -123,14 +123,14 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
df.write df.write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.stream() .startStream()
.stop() .stop()
assert(LastOptions.partitionColumns == Nil) assert(LastOptions.partitionColumns == Nil)
df.write df.write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.partitionBy("a") .partitionBy("a")
.stream() .startStream()
.stop() .stop()
assert(LastOptions.partitionColumns == Seq("a")) assert(LastOptions.partitionColumns == Seq("a"))
...@@ -138,7 +138,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -138,7 +138,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
df.write df.write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.partitionBy("A") .partitionBy("A")
.stream() .startStream()
.stop() .stop()
assert(LastOptions.partitionColumns == Seq("a")) assert(LastOptions.partitionColumns == Seq("a"))
} }
...@@ -147,7 +147,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -147,7 +147,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
df.write df.write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.partitionBy("b") .partitionBy("b")
.stream() .startStream()
.stop() .stop()
} }
} }
...@@ -163,7 +163,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -163,7 +163,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
df.write df.write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.stream("/test") .startStream("/test")
.stop() .stop()
assert(LastOptions.parameters("path") == "/test") assert(LastOptions.parameters("path") == "/test")
...@@ -187,7 +187,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -187,7 +187,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.option("intOpt", 56) .option("intOpt", 56)
.option("boolOpt", false) .option("boolOpt", false)
.option("doubleOpt", 6.7) .option("doubleOpt", 6.7)
.stream("/test") .startStream("/test")
.stop() .stop()
assert(LastOptions.parameters("intOpt") == "56") assert(LastOptions.parameters("intOpt") == "56")
...@@ -205,7 +205,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -205,7 +205,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.write .write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.queryName(name) .queryName(name)
.stream() .startStream()
} }
/** Start a query without specifying a name */ /** Start a query without specifying a name */
...@@ -215,7 +215,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B ...@@ -215,7 +215,7 @@ class DataFrameReaderWriterSuite extends StreamTest with SharedSQLContext with B
.stream("/test") .stream("/test")
.write .write
.format("org.apache.spark.sql.streaming.test") .format("org.apache.spark.sql.streaming.test")
.stream() .startStream()
} }
/** Get the names of active streams */ /** Get the names of active streams */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册