未验证 提交 3c6df778 编写于 作者: J Jark Wu

[FLINK-16991][table-planner-blink] Rename current LogicalSink into LogicalLegacySink

上级 78b516ca
......@@ -1749,11 +1749,11 @@ the result of multiple-sinks plan is
{% highlight text %}
== Abstract Syntax Tree ==
LogicalSink(name=[MySink1], fields=[count, word])
LogicalLegacySink(name=[MySink1], fields=[count, word])
+- LogicalFilter(condition=[LIKE($1, _UTF-16LE'F%')])
+- LogicalTableScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]])
LogicalSink(name=[MySink2], fields=[count, word])
LogicalLegacySink(name=[MySink2], fields=[count, word])
+- LogicalUnion(all=[true])
:- LogicalFilter(condition=[LIKE($1, _UTF-16LE'F%')])
: +- LogicalTableScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]])
......@@ -1763,10 +1763,10 @@ LogicalSink(name=[MySink2], fields=[count, word])
Calc(select=[count, word], where=[LIKE(word, _UTF-16LE'F%')], reuse_id=[1])
+- TableSourceScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]], fields=[count, word])
Sink(name=[MySink1], fields=[count, word])
LegacySink(name=[MySink1], fields=[count, word])
+- Reused(reference_id=[1])
Sink(name=[MySink2], fields=[count, word])
LegacySink(name=[MySink2], fields=[count, word])
+- Union(all=[true], union=[count, word])
:- Reused(reference_id=[1])
+- TableSourceScan(table=[[default_catalog, default_database, MySource2, source: [CsvTableSource(read fields: count, word)]]], fields=[count, word])
......
......@@ -1730,11 +1730,11 @@ print(explanation)
{% highlight text %}
== Abstract Syntax Tree ==
LogicalSink(name=[MySink1], fields=[count, word])
LogicalLegacySink(name=[MySink1], fields=[count, word])
+- LogicalFilter(condition=[LIKE($1, _UTF-16LE'F%')])
+- LogicalTableScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]])
LogicalSink(name=[MySink2], fields=[count, word])
LogicalLegacySink(name=[MySink2], fields=[count, word])
+- LogicalUnion(all=[true])
:- LogicalFilter(condition=[LIKE($1, _UTF-16LE'F%')])
: +- LogicalTableScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]])
......@@ -1744,10 +1744,10 @@ LogicalSink(name=[MySink2], fields=[count, word])
Calc(select=[count, word], where=[LIKE(word, _UTF-16LE'F%')], reuse_id=[1])
+- TableSourceScan(table=[[default_catalog, default_database, MySource1, source: [CsvTableSource(read fields: count, word)]]], fields=[count, word])
Sink(name=[MySink1], fields=[count, word])
LegacySink(name=[MySink1], fields=[count, word])
+- Reused(reference_id=[1])
Sink(name=[MySink2], fields=[count, word])
LegacySink(name=[MySink2], fields=[count, word])
+- Union(all=[true], union=[count, word])
:- Reused(reference_id=[1])
+- TableSourceScan(table=[[default_catalog, default_database, MySource2, source: [CsvTableSource(read fields: count, word)]]], fields=[count, word])
......
......@@ -18,11 +18,10 @@
package org.apache.flink.table.planner.calcite
import org.apache.flink.table.planner.calcite.FlinkRelFactories.{ExpandFactory, RankFactory, SinkFactory}
import org.apache.flink.table.planner.calcite.FlinkRelFactories.{ExpandFactory, RankFactory}
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.runtime.operators.rank.{RankRange, RankType}
import org.apache.flink.table.sinks.TableSink
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptTable.ToRelContext
......@@ -60,7 +59,6 @@ object FlinkLogicalRelFactories {
val FLINK_LOGICAL_TABLE_SCAN_FACTORY = new TableScanFactoryImpl
val FLINK_LOGICAL_EXPAND_FACTORY = new ExpandFactoryImpl
val FLINK_LOGICAL_RANK_FACTORY = new RankFactoryImpl
val FLINK_LOGICAL_SINK_FACTORY = new SinkFactoryImpl
/** A [[RelBuilderFactory]] that creates a [[RelBuilder]] that will
* create logical relational expressions for everything. */
......@@ -75,8 +73,7 @@ object FlinkLogicalRelFactories {
FLINK_LOGICAL_VALUES_FACTORY,
FLINK_LOGICAL_TABLE_SCAN_FACTORY,
FLINK_LOGICAL_EXPAND_FACTORY,
FLINK_LOGICAL_RANK_FACTORY,
FLINK_LOGICAL_SINK_FACTORY))
FLINK_LOGICAL_RANK_FACTORY))
/**
* Implementation of [[ProjectFactory]] that returns a [[FlinkLogicalCalc]].
......@@ -261,17 +258,4 @@ object FlinkLogicalRelFactories {
rankNumberType, outputRankNumber)
}
}
/**
* Implementation of [[FlinkRelFactories.SinkFactory]] that returns a [[FlinkLogicalSink]].
*/
class SinkFactoryImpl extends SinkFactory {
def createSink(
input: RelNode,
sink: TableSink[_],
sinkName: String): RelNode = {
FlinkLogicalSink.create(input, sink, sinkName)
}
}
}
......@@ -20,14 +20,13 @@ package org.apache.flink.table.planner.calcite
import org.apache.flink.table.operations.QueryOperation
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.calcite.FlinkRelFactories.{ExpandFactory, RankFactory, SinkFactory}
import org.apache.flink.table.planner.calcite.FlinkRelFactories.{ExpandFactory, RankFactory}
import org.apache.flink.table.planner.expressions.{PlannerWindowProperty, WindowProperty}
import org.apache.flink.table.planner.plan.QueryOperationConverter
import org.apache.flink.table.planner.plan.logical.LogicalWindow
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalTableAggregate, LogicalWatermarkAssigner, LogicalWindowAggregate, LogicalWindowTableAggregate}
import org.apache.flink.table.planner.plan.utils.AggregateUtil
import org.apache.flink.table.runtime.operators.rank.{RankRange, RankType}
import org.apache.flink.table.sinks.TableSink
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelCollation
......@@ -71,10 +70,6 @@ class FlinkRelBuilder(
Util.first(context.unwrap(classOf[RankFactory]), FlinkRelFactories.DEFAULT_RANK_FACTORY)
}
private val sinkFactory: SinkFactory = {
Util.first(context.unwrap(classOf[SinkFactory]), FlinkRelFactories.DEFAULT_SINK_FACTORY)
}
override def getRelOptSchema: RelOptSchema = relOptSchema
override def getCluster: RelOptCluster = relOptCluster
......@@ -91,12 +86,6 @@ class FlinkRelBuilder(
push(expand)
}
def sink(sink: TableSink[_], sinkName: String): RelBuilder = {
val input = build()
val sinkNode = sinkFactory.createSink(input, sink, sinkName)
push(sinkNode)
}
def rank(
partitionKey: ImmutableBitSet,
orderKey: RelCollation,
......
......@@ -18,9 +18,8 @@
package org.apache.flink.table.planner.calcite
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalExpand, LogicalRank, LogicalSink}
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalExpand, LogicalRank}
import org.apache.flink.table.runtime.operators.rank.{RankRange, RankType}
import org.apache.flink.table.sinks.TableSink
import org.apache.calcite.plan.Contexts
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeField}
......@@ -56,8 +55,6 @@ object FlinkRelFactories {
val DEFAULT_RANK_FACTORY = new RankFactoryImpl
val DEFAULT_SINK_FACTORY = new SinkFactoryImpl
/**
* Can create a [[LogicalExpand]] of the
* appropriate type for this rule's calling convention.
......@@ -112,27 +109,4 @@ object FlinkRelFactories {
rankNumberType, outputRankNumber)
}
}
/**
* Can create a [[LogicalSink]] of the
* appropriate type for this rule's calling convention.
*/
trait SinkFactory {
def createSink(
input: RelNode,
sink: TableSink[_],
sinkName: String): RelNode
}
/**
* Implementation of [[SinkFactory]] that returns a [[LogicalSink]].
*/
class SinkFactoryImpl extends SinkFactory {
def createSink(
input: RelNode,
sink: TableSink[_],
sinkName: String): RelNode = LogicalSink.create(input, sink, sinkName)
}
}
......@@ -163,7 +163,7 @@ class RelTimeIndicatorConverter(rexBuilder: RexBuilder) extends RelShuttle {
val input = snapshot.getInput.accept(this)
snapshot.copy(snapshot.getTraitSet, input, snapshot.getPeriod)
case sink: LogicalSink =>
case sink: LogicalLegacySink =>
var newInput = sink.getInput.accept(this)
var needsConversion = false
......@@ -181,7 +181,7 @@ class RelTimeIndicatorConverter(rexBuilder: RexBuilder) extends RelShuttle {
if (needsConversion) {
newInput = LogicalProject.create(newInput, projects, newInput.getRowType.getFieldNames)
}
new LogicalSink(
new LogicalLegacySink(
sink.getCluster,
sink.getTraitSet,
newInput,
......@@ -482,7 +482,7 @@ object RelTimeIndicatorConverter {
val convertedRoot = rootRel.accept(converter)
// the LogicalSink is converted in RelTimeIndicatorConverter before
if (rootRel.isInstanceOf[LogicalSink] || !needFinalTimeIndicatorConversion) {
if (rootRel.isInstanceOf[LogicalLegacySink] || !needFinalTimeIndicatorConversion) {
return convertedRoot
}
var needsConversion = false
......
......@@ -33,7 +33,7 @@ import org.apache.flink.table.planner.calcite.{CalciteParser, FlinkPlannerImpl,
import org.apache.flink.table.planner.catalog.CatalogManagerCalciteSchema
import org.apache.flink.table.planner.expressions.PlannerTypeInferenceUtilImpl
import org.apache.flink.table.planner.hint.FlinkHints
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalSink
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalLegacySink
import org.apache.flink.table.planner.plan.nodes.exec.ExecNode
import org.apache.flink.table.planner.plan.nodes.physical.FlinkPhysicalRel
import org.apache.flink.table.planner.plan.optimize.Optimizer
......@@ -178,7 +178,7 @@ abstract class PlannerBase(
val sinkSchema = s.getSink.getTableSchema
// validate query schema and sink schema, and apply cast if possible
val query = validateSchemaAndApplyImplicitCast(input, sinkSchema, getTypeFactory)
LogicalSink.create(
LogicalLegacySink.create(
query,
s.getSink,
"UnregisteredSink",
......@@ -201,7 +201,7 @@ abstract class PlannerBase(
TableSchemaUtils.getPhysicalSchema(table.getSchema),
getTypeFactory,
Some(catalogSink.getTableIdentifier.asSummaryString()))
LogicalSink.create(
LogicalLegacySink.create(
query,
sink,
identifier.toString,
......@@ -233,7 +233,7 @@ abstract class PlannerBase(
typeInfo,
needUpdateBefore,
withChangeFlag)
LogicalSink.create(
LogicalLegacySink.create(
query,
tableSink,
"DataStreamTableSink",
......
......@@ -35,7 +35,7 @@ import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
* @param sink Table sink to write into
* @param sinkName Name of tableSink, which is not required property, that is, it could be null
*/
abstract class Sink(
abstract class LegacySink(
cluster: RelOptCluster,
traitSet: RelTraitSet,
input: RelNode,
......
......@@ -29,11 +29,11 @@ import java.util
import scala.collection.JavaConversions._
/**
* Sub-class of [[Sink]] that is a relational expression
* Sub-class of [[LegacySink]] that is a relational expression
* which writes out data of input node into a [[TableSink]].
* This class corresponds to Calcite logical rel.
*/
final class LogicalSink(
final class LogicalLegacySink(
cluster: RelOptCluster,
traitSet: RelTraitSet,
input: RelNode,
......@@ -41,24 +41,24 @@ final class LogicalSink(
sinkName: String,
val catalogTable: CatalogTable,
val staticPartitions: Map[String, String])
extends Sink(cluster, traitSet, input, sink, sinkName) {
extends LegacySink(cluster, traitSet, input, sink, sinkName) {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new LogicalSink(
new LogicalLegacySink(
cluster, traitSet, inputs.head, sink, sinkName, catalogTable, staticPartitions)
}
}
object LogicalSink {
object LogicalLegacySink {
def create(input: RelNode,
sink: TableSink[_],
sinkName: String,
catalogTable: CatalogTable = null,
staticPartitions: Map[String, String] = Map()): LogicalSink = {
staticPartitions: Map[String, String] = Map()): LogicalLegacySink = {
val traits = input.getCluster.traitSetOf(Convention.NONE)
new LogicalSink(
new LogicalLegacySink(
input.getCluster, traits, input, sink, sinkName, catalogTable, staticPartitions)
}
}
......@@ -20,7 +20,7 @@ package org.apache.flink.table.planner.plan.nodes.logical
import org.apache.flink.table.catalog.CatalogTable
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalSink, Sink}
import org.apache.flink.table.planner.plan.nodes.calcite.{LogicalLegacySink, LegacySink}
import org.apache.flink.table.sinks.TableSink
import org.apache.calcite.plan.{Convention, RelOptCluster, RelOptRule, RelTraitSet}
......@@ -32,10 +32,10 @@ import java.util
import scala.collection.JavaConversions._
/**
* Sub-class of [[Sink]] that is a relational expression
* Sub-class of [[LegacySink]] that is a relational expression
* which writes out data of input node into a [[TableSink]].
*/
class FlinkLogicalSink(
class FlinkLogicalLegacySink(
cluster: RelOptCluster,
traitSet: RelTraitSet,
input: RelNode,
......@@ -43,27 +43,27 @@ class FlinkLogicalSink(
sinkName: String,
val catalogTable: CatalogTable,
val staticPartitions: Map[String, String])
extends Sink(cluster, traitSet, input, sink, sinkName)
with FlinkLogicalRel {
extends LegacySink(cluster, traitSet, input, sink, sinkName)
with FlinkLogicalRel {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new FlinkLogicalSink(
new FlinkLogicalLegacySink(
cluster, traitSet, inputs.head, sink, sinkName, catalogTable, staticPartitions)
}
}
private class FlinkLogicalSinkConverter
private class FlinkLogicalLegacySinkConverter
extends ConverterRule(
classOf[LogicalSink],
classOf[LogicalLegacySink],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalSinkConverter") {
"FlinkLogicalLegacySinkConverter") {
override def convert(rel: RelNode): RelNode = {
val sink = rel.asInstanceOf[LogicalSink]
val sink = rel.asInstanceOf[LogicalLegacySink]
val newInput = RelOptRule.convert(sink.getInput, FlinkConventions.LOGICAL)
FlinkLogicalSink.create(
FlinkLogicalLegacySink.create(
newInput,
sink.sink,
sink.sinkName,
......@@ -72,18 +72,18 @@ private class FlinkLogicalSinkConverter
}
}
object FlinkLogicalSink {
val CONVERTER: ConverterRule = new FlinkLogicalSinkConverter()
object FlinkLogicalLegacySink {
val CONVERTER: ConverterRule = new FlinkLogicalLegacySinkConverter()
def create(
input: RelNode,
sink: TableSink[_],
sinkName: String,
catalogTable: CatalogTable = null,
staticPartitions: Map[String, String] = Map()): FlinkLogicalSink = {
staticPartitions: Map[String, String] = Map()): FlinkLogicalLegacySink = {
val cluster = input.getCluster
val traitSet = cluster.traitSetOf(FlinkConventions.LOGICAL).simplify()
new FlinkLogicalSink(
new FlinkLogicalLegacySink(
cluster, traitSet, input, sink, sinkName, catalogTable, staticPartitions)
}
}
......@@ -26,7 +26,7 @@ import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.codegen.SinkCodeGenerator._
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext}
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.nodes.exec.{BatchExecNode, ExecNode}
import org.apache.flink.table.planner.plan.utils.UpdatingPlanChecker
import org.apache.flink.table.planner.sinks.DataStreamTableSink
......@@ -46,18 +46,18 @@ import scala.collection.JavaConversions._
/**
* Batch physical RelNode to to write data into an external sink defined by a [[TableSink]].
*/
class BatchExecSink[T](
class BatchExecLegacySink[T](
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
sink: TableSink[T],
sinkName: String)
extends Sink(cluster, traitSet, inputRel, sink, sinkName)
with BatchPhysicalRel
with BatchExecNode[Any] {
extends LegacySink(cluster, traitSet, inputRel, sink, sinkName)
with BatchPhysicalRel
with BatchExecNode[Any] {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchExecSink(cluster, traitSet, inputs.get(0), sink, sinkName)
new BatchExecLegacySink(cluster, traitSet, inputs.get(0), sink, sinkName)
}
//~ ExecNode methods -----------------------------------------------------------
......
......@@ -27,7 +27,7 @@ import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.SinkCodeGenerator.generateRowConverterOperator
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext}
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, StreamExecNode}
import org.apache.flink.table.planner.plan.utils.{ChangelogPlanUtils, UpdatingPlanChecker}
import org.apache.flink.table.planner.sinks.DataStreamTableSink
......@@ -45,20 +45,20 @@ import scala.collection.JavaConversions._
/**
* Stream physical RelNode to to write data into an external sink defined by a [[TableSink]].
*/
class StreamExecSink[T](
class StreamExecLegacySink[T](
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
sink: TableSink[T],
sinkName: String)
extends Sink(cluster, traitSet, inputRel, sink, sinkName)
with StreamPhysicalRel
with StreamExecNode[Any] {
extends LegacySink(cluster, traitSet, inputRel, sink, sinkName)
with StreamPhysicalRel
with StreamExecNode[Any] {
override def requireWatermark: Boolean = false
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamExecSink(cluster, traitSet, inputs.get(0), sink, sinkName)
new StreamExecLegacySink(cluster, traitSet, inputs.get(0), sink, sinkName)
}
//~ ExecNode methods -----------------------------------------------------------
......
......@@ -22,7 +22,7 @@ import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.catalog.{CatalogManager, FunctionCatalog}
import org.apache.flink.table.planner.calcite.{FlinkContext, SqlExprToRexConverterFactory}
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchExecSink
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchExecLegacySink
import org.apache.flink.table.planner.plan.optimize.program.{BatchOptimizeContext, FlinkBatchProgram}
import org.apache.flink.table.planner.plan.schema.IntermediateRelTable
import org.apache.flink.table.planner.utils.TableConfigUtils
......@@ -57,7 +57,7 @@ class BatchCommonSubGraphBasedOptimizer(planner: BatchPlanner)
val optimizedTree = optimizeTree(originTree)
optimizedTree match {
case _: BatchExecSink[_] => // ignore
case _: BatchExecLegacySink[_] => // ignore
case _ =>
val name = createUniqueIntermediateRelTableName
val intermediateRelTable = new IntermediateRelTable(Collections.singletonList(name),
......
......@@ -23,7 +23,7 @@ import org.apache.flink.configuration.ConfigOption
import org.apache.flink.configuration.ConfigOptions.key
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.planner.plan.`trait`.MiniBatchInterval
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.reuse.SubplanReuser.{SubplanReuseContext, SubplanReuseShuttle}
import org.apache.flink.table.planner.plan.rules.logical.WindowPropertiesRules
import org.apache.flink.table.planner.plan.utils.{DefaultRelShuttle, ExpandTableScanShuttle}
......@@ -43,7 +43,7 @@ import scala.collection.mutable
/**
* A [[RelNodeBlock]] is a sub-tree in the [[RelNode]] DAG, and represents common sub-graph
* in [[CommonSubGraphBasedOptimizer]]. All [[RelNode]]s in each block have
* only one [[Sink]] output.
* only one [[LegacySink]] output.
*
* The algorithm works as follows:
* 1. If there is only one tree, the whole tree is in one block. (the next steps is needless.)
......@@ -91,7 +91,7 @@ import scala.collection.mutable
* }}}
*
* This [[RelNode]] DAG will be decomposed into three [[RelNodeBlock]]s, the break-point
* is the [[RelNode]](`Join(a1=b2)`) which data outputs to multiple [[Sink]]s.
* is the [[RelNode]](`Join(a1=b2)`) which data outputs to multiple [[LegacySink]]s.
* <p>Notes: Although `Project(a,b,c)` has two parents (outputs),
* they eventually merged at `Join(a1=b2)`. So `Project(a,b,c)` is not a break-point.
* <p>the first [[RelNodeBlock]] includes TableScan, Project(a,b,c), Filter(a>0),
......@@ -111,8 +111,8 @@ import scala.collection.mutable
* will be wrapped as an IntermediateRelTable first, and then be converted to a new TableScan
* which is the new output node of current block and is also the input of its parent blocks.
*
* @param outputNode A RelNode of the output in the block, which could be a [[Sink]] or
* other RelNode which data outputs to multiple [[Sink]]s.
* @param outputNode A RelNode of the output in the block, which could be a [[LegacySink]] or
* other RelNode which data outputs to multiple [[LegacySink]]s.
*/
class RelNodeBlock(val outputNode: RelNode) {
// child (or input) blocks
......
......@@ -25,7 +25,7 @@ import org.apache.flink.table.planner.calcite.{FlinkContext, SqlExprToRexConvert
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.`trait`.{MiniBatchInterval, MiniBatchIntervalTrait, MiniBatchIntervalTraitDef, MiniBatchMode, ModifyKindSet, ModifyKindSetTraitDef, UpdateKind, UpdateKindTraitDef}
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.nodes.physical.stream.{StreamExecDataStreamScan, StreamExecIntermediateTableScan, StreamPhysicalRel}
import org.apache.flink.table.planner.plan.optimize.program.{FlinkStreamProgram, StreamOptimizeContext}
import org.apache.flink.table.planner.plan.schema.IntermediateRelTable
......@@ -109,7 +109,7 @@ class StreamCommonSubGraphBasedOptimizer(planner: StreamPlanner)
val blockLogicalPlan = block.getPlan
blockLogicalPlan match {
case s: Sink =>
case s: LegacySink =>
require(isSinkBlock)
val optimizedTree = optimizeTree(
s,
......@@ -210,7 +210,7 @@ class StreamCommonSubGraphBasedOptimizer(planner: StreamPlanner)
val blockLogicalPlan = block.getPlan
blockLogicalPlan match {
case n: Sink =>
case n: LegacySink =>
require(isSinkBlock)
val optimizedPlan = optimizeTree(
n, updateBeforeRequired, miniBatchInterval, isSinkBlock = true)
......
......@@ -112,7 +112,7 @@ class FlinkChangelogModeInferenceProgram extends FlinkOptimizeProgram[StreamOpti
rel: StreamPhysicalRel,
requiredTrait: ModifyKindSetTrait,
requester: String): StreamPhysicalRel = rel match {
case sink: StreamExecSink[_] =>
case sink: StreamExecLegacySink[_] =>
val (sinkRequiredTrait, name) = sink.sink match {
case _: UpsertStreamTableSink[_] =>
(ModifyKindSetTrait.ALL_CHANGES, "UpsertStreamTableSink")
......@@ -370,7 +370,7 @@ class FlinkChangelogModeInferenceProgram extends FlinkOptimizeProgram[StreamOpti
def visit(
rel: StreamPhysicalRel,
requiredTrait: UpdateKindTrait): Option[StreamPhysicalRel] = rel match {
case sink: StreamExecSink[_] =>
case sink: StreamExecLegacySink[_] =>
val childModifyKindSet = getModifyKindSet(sink.getInput)
val onlyAfter = onlyAfterOrNone(childModifyKindSet)
val beforeAndAfter = beforeAfterOrNone(childModifyKindSet)
......
......@@ -20,7 +20,7 @@ package org.apache.flink.table.planner.plan.reuse
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalLegacyTableSourceScan
import org.apache.flink.table.planner.plan.nodes.physical.PhysicalLegacyTableSourceScan
import org.apache.flink.table.planner.plan.utils.{DefaultRelShuttle, FlinkRelOptUtil}
......@@ -157,7 +157,7 @@ object SubplanReuser {
// Exchange node can not be reused if its input is reusable disabled
case e: Exchange => isNodeReusableDisabled(e.getInput)
// TableFunctionScan and sink can not be reused
case _: TableFunctionScan | _: Sink => true
case _: TableFunctionScan | _: LegacySink => true
case _ => false
}
}
......
......@@ -335,7 +335,7 @@ object FlinkBatchRuleSets {
FlinkLogicalRank.CONVERTER,
FlinkLogicalWindowAggregate.CONVERTER,
FlinkLogicalSnapshot.CONVERTER,
FlinkLogicalSink.CONVERTER
FlinkLogicalLegacySink.CONVERTER
)
/**
......@@ -423,7 +423,7 @@ object FlinkBatchRuleSets {
BatchExecCorrelateRule.INSTANCE,
BatchExecPythonCorrelateRule.INSTANCE,
// sink
BatchExecSinkRule.INSTANCE
BatchExecLegacySinkRule.INSTANCE
)
/**
......
......@@ -310,7 +310,7 @@ object FlinkStreamRuleSets {
FlinkLogicalWindowTableAggregate.CONVERTER,
FlinkLogicalSnapshot.CONVERTER,
FlinkLogicalMatch.CONVERTER,
FlinkLogicalSink.CONVERTER
FlinkLogicalLegacySink.CONVERTER
)
/**
......@@ -406,7 +406,7 @@ object FlinkStreamRuleSets {
StreamExecCorrelateRule.INSTANCE,
StreamExecPythonCorrelateRule.INSTANCE,
// sink
StreamExecSinkRule.INSTANCE
StreamExecLegacySinkRule.INSTANCE
)
/**
......
......@@ -22,8 +22,8 @@ import org.apache.flink.table.api.TableException
import org.apache.flink.table.filesystem.FileSystemTableFactory
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalSink
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchExecSink
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalLegacySink
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchExecLegacySink
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil
import org.apache.flink.table.sinks.PartitionableTableSink
......@@ -33,14 +33,14 @@ import org.apache.calcite.rel.{RelCollations, RelNode}
import scala.collection.JavaConversions._
class BatchExecSinkRule extends ConverterRule(
classOf[FlinkLogicalSink],
class BatchExecLegacySinkRule extends ConverterRule(
classOf[FlinkLogicalLegacySink],
FlinkConventions.LOGICAL,
FlinkConventions.BATCH_PHYSICAL,
"BatchExecSinkRule") {
"BatchExecLegacySinkRule") {
def convert(rel: RelNode): RelNode = {
val sinkNode = rel.asInstanceOf[FlinkLogicalSink]
val sinkNode = rel.asInstanceOf[FlinkLogicalLegacySink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
var requiredTraitSet = sinkNode.getInput.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
if (sinkNode.catalogTable != null && sinkNode.catalogTable.isPartitioned) {
......@@ -78,7 +78,7 @@ class BatchExecSinkRule extends ConverterRule(
val newInput = RelOptRule.convert(sinkNode.getInput, requiredTraitSet)
new BatchExecSink(
new BatchExecLegacySink(
rel.getCluster,
newTrait,
newInput,
......@@ -87,8 +87,8 @@ class BatchExecSinkRule extends ConverterRule(
}
}
object BatchExecSinkRule {
object BatchExecLegacySinkRule {
val INSTANCE: RelOptRule = new BatchExecSinkRule
val INSTANCE: RelOptRule = new BatchExecLegacySinkRule
}
......@@ -22,8 +22,8 @@ import org.apache.flink.table.api.TableException
import org.apache.flink.table.filesystem.FileSystemTableFactory
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalSink
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamExecSink
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalLegacySink
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamExecLegacySink
import org.apache.flink.table.sinks.PartitionableTableSink
import org.apache.calcite.plan.RelOptRule
......@@ -32,14 +32,14 @@ import org.apache.calcite.rel.convert.ConverterRule
import scala.collection.JavaConversions._
class StreamExecSinkRule extends ConverterRule(
classOf[FlinkLogicalSink],
class StreamExecLegacySinkRule extends ConverterRule(
classOf[FlinkLogicalLegacySink],
FlinkConventions.LOGICAL,
FlinkConventions.STREAM_PHYSICAL,
"StreamExecSinkRule") {
"StreamExecLegacySinkRule") {
def convert(rel: RelNode): RelNode = {
val sinkNode = rel.asInstanceOf[FlinkLogicalSink]
val sinkNode = rel.asInstanceOf[FlinkLogicalLegacySink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
var requiredTraitSet = sinkNode.getInput.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
if (sinkNode.catalogTable != null && sinkNode.catalogTable.isPartitioned) {
......@@ -75,7 +75,7 @@ class StreamExecSinkRule extends ConverterRule(
val newInput = RelOptRule.convert(sinkNode.getInput, requiredTraitSet)
new StreamExecSink(
new StreamExecLegacySink(
rel.getCluster,
newTrait,
newInput,
......@@ -84,8 +84,8 @@ class StreamExecSinkRule extends ConverterRule(
}
}
object StreamExecSinkRule {
object StreamExecLegacySinkRule {
val INSTANCE: RelOptRule = new StreamExecSinkRule
val INSTANCE: RelOptRule = new StreamExecLegacySinkRule
}
......@@ -17,7 +17,7 @@
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, ExecNodeVisitorImpl}
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalRel
......@@ -116,7 +116,7 @@ object ExecNodePlanDumper {
}
val reuseId = reuseInfoBuilder.getReuseId(node)
val isReuseNode = reuseId.isDefined
if (node.isInstanceOf[Sink] || (isReuseNode && !reuseInfoMap.containsKey(node))) {
if (node.isInstanceOf[LegacySink] || (isReuseNode && !reuseInfoMap.containsKey(node))) {
if (isReuseNode) {
reuseInfoMap.put(node, (reuseId.get, true))
}
......
......@@ -19,7 +19,7 @@ package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.planner.delegation.PlannerBase
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.sinks.UpsertStreamTableSink
import scala.collection.JavaConversions._
......@@ -27,7 +27,7 @@ import scala.collection.JavaConversions._
object UpdatingPlanChecker {
def getUniqueKeyForUpsertSink(
sinkNode: Sink,
sinkNode: LegacySink,
planner: PlannerBase,
sink: UpsertStreamTableSink[_]): Option[Array[String]] = {
// extract unique key fields
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
+- LogicalProject(d=[$0], e=[$1])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CollectionTableSource(a, b, c)]]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
LegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
+- Calc(select=[a, b], where=[>(a, 10)])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CollectionTableSource(a, b, c)]]], fields=[a, b, c])
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
+- LogicalProject(a=[$0], b=[$1])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CollectionTableSource(a, b, c)]]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
LegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[d, e])
+- Calc(select=[a, b], where=[>(a, 10)])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CollectionTableSource(a, b, c)]]], fields=[a, b, c])
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- LogicalProject(first=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- Calc(select=[first])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable]], fields=[first, id, score, last])
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- LogicalProject(first=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first, id, score, last)]]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first)]]], fields=[first])
== Physical Execution Plan ==
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- LogicalProject(first=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first, id, score, last)]]])
LogicalSink(name=[`default_catalog`.`default_database`.`MySink2`], fields=[last])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink2`], fields=[last])
+- LogicalProject(last=[$3])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first, id, score, last)]]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
LegacySink(name=[`default_catalog`.`default_database`.`MySink1`], fields=[first])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first)]]], fields=[first])
Sink(name=[`default_catalog`.`default_database`.`MySink2`], fields=[last])
LegacySink(name=[`default_catalog`.`default_database`.`MySink2`], fields=[last])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: last)]]], fields=[last])
== Physical Execution Plan ==
......
== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`MySink`], fields=[first])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[first])
+- LogicalProject(first=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first, id, score, last)]]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`MySink`], fields=[first])
LegacySink(name=[`default_catalog`.`default_database`.`MySink`], fields=[first])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [CsvTableSource(read fields: first)]]], fields=[first])
== Physical Execution Plan ==
......
......@@ -283,14 +283,14 @@ Calc(select=[a, b, c, e, f]): rowcount = , cumulative cost = {rows, cpu, io, net
<TestCase name="testExplainWithMultiSinks[extended=false]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[>($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
LogicalSink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -304,11 +304,11 @@ HashAggregate(isMerge=[true], groupBy=[a], select=[a, Final_COUNT(count1$0) AS c
+- Calc(select=[a])
+- BoundedStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c])
Sink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
LegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
+- Calc(select=[a, cnt], where=[>(cnt, 10)])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
LegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
+- Calc(select=[a, cnt], where=[<(cnt, 10)])
+- Reused(reference_id=[1])
......@@ -362,14 +362,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
<TestCase name="testExplainWithMultiSinks[extended=true]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[>($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
LogicalSink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -383,11 +383,11 @@ HashAggregate(isMerge=[true], groupBy=[a], select=[a, Final_COUNT(count1$0) AS c
+- Calc(select=[a]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- BoundedStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
Sink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[a, cnt]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, cnt], where=[>(cnt, 10)]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, cnt], where=[<(cnt, 10)]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Reused(reference_id=[1])
......@@ -441,13 +441,13 @@ Sink(name=[`default_catalog`.`default_database`.`sink2`], fields=[a, cnt]): rowc
<TestCase name="testExplainWithSingleSink[extended=false]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Calc(select=[a, b, c], where=[>(a, 10)])
+- BoundedStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c])
......@@ -477,13 +477,13 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
<TestCase name="testExplainWithSingleSink[extended=true]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, b, c], where=[>(a, 10)]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- BoundedStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
......
......@@ -345,14 +345,14 @@ Calc(select=[a, b, c, e, f], changelogMode=[I]): rowcount = , cumulative cost =
<TestCase name="testExplainWithMultiSinks[extended=false]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[>($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -365,11 +365,11 @@ GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], reuse_id=[1])
+- Calc(select=[a])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c])
Sink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
+- Calc(select=[a, cnt], where=[>(cnt, 10)])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
+- Calc(select=[a, cnt], where=[<(cnt, 10)])
+- Reused(reference_id=[1])
......@@ -427,14 +427,14 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
<TestCase name="testExplainWithMultiSinks[extended=true]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[>($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -447,11 +447,11 @@ GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], changelogMode=[I,UB,UA]
+- Calc(select=[a], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- DataStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
Sink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[a, cnt], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, cnt], where=[>(cnt, 10)], changelogMode=[I,UB,UA]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, cnt], where=[<(cnt, 10)], changelogMode=[I,UB,UA]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Reused(reference_id=[1])
......@@ -509,13 +509,13 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[a, cnt],
<TestCase name="testExplainWithSingleSink[extended=false]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
+- Calc(select=[a, b, c], where=[>(a, 10)])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c])
......@@ -545,13 +545,13 @@ Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
<TestCase name="testExplainWithSingleSink[extended=true]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalFilter(condition=[>($0, 10)])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable1]])
== Optimized Logical Plan ==
Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b, c], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[a, b, c], where=[>(a, 10)], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- DataStreamScan(table=[[default_catalog, default_database, MyTable1]], fields=[a, b, c], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
......@@ -635,7 +635,7 @@ SortLimit(orderBy=[a ASC], offset=[0], fetch=[5], strategy=[AppendFastStrategy],
<TestCase name="testMiniBatchIntervalInfer[extended=true]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
+- LogicalProject(id1=[$0], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject(id1=[$0], $f1=[$TUMBLE($2, 8000:INTERVAL SECOND)], text=[$1], $f3=[_UTF-16LE'#'])
......@@ -646,7 +646,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a
+- LogicalWatermarkAssigner(rowtime=[rowtime], watermark=[-($4, 0:INTERVAL MILLISECOND)])
+- LogicalTableScan(table=[[default_catalog, default_database, T2]])
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
+- LogicalProject(id1=[$0], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject(id1=[$0], $f1=[HOP($2, 12000:INTERVAL SECOND, 6000:INTERVAL SECOND)], text=[$1], $f3=[_UTF-16LE'*'])
......@@ -666,13 +666,13 @@ WindowJoin(joinType=[InnerJoin], windowBounds=[isRowTime=true, leftLowerBound=-2
+- WatermarkAssigner(rowtime=[rowtime], watermark=[-(rowtime, 0:INTERVAL MILLISECOND)], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- DataStreamScan(table=[[default_catalog, default_database, T2]], fields=[id2, cnt, name, goods, rowtime], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
Sink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- GroupWindowAggregate(groupBy=[id1], window=[TumblingGroupWindow('w$, rowtime, 8000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Exchange(distribution=[hash[id1]], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[id1, rowtime, text, _UTF-16LE'#' AS $f3], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
LegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b], changelogMode=[NONE]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- GroupWindowAggregate(groupBy=[id1], window=[SlidingGroupWindow('w$, rowtime, 6000, 12000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Exchange(distribution=[hash[id1]], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
+- Calc(select=[id1, rowtime, text, _UTF-16LE'*' AS $f3], changelogMode=[I]): rowcount = , cumulative cost = {rows, cpu, io, network, memory}
......@@ -775,7 +775,7 @@ Union(all=[true], union=[a, b, c])
<TestCase name="testMiniBatchIntervalInfer[extended=false]">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
+- LogicalProject(id1=[$0], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject(id1=[$0], $f1=[$TUMBLE($2, 8000:INTERVAL SECOND)], text=[$1], $f3=[_UTF-16LE'#'])
......@@ -786,7 +786,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a
+- LogicalWatermarkAssigner(rowtime=[rowtime], watermark=[-($4, 0:INTERVAL MILLISECOND)])
+- LogicalTableScan(table=[[default_catalog, default_database, T2]])
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
+- LogicalProject(id1=[$0], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject(id1=[$0], $f1=[HOP($2, 12000:INTERVAL SECOND, 6000:INTERVAL SECOND)], text=[$1], $f3=[_UTF-16LE'*'])
......@@ -806,13 +806,13 @@ WindowJoin(joinType=[InnerJoin], windowBounds=[isRowTime=true, leftLowerBound=-2
+- WatermarkAssigner(rowtime=[rowtime], watermark=[-(rowtime, 0:INTERVAL MILLISECOND)])
+- DataStreamScan(table=[[default_catalog, default_database, T2]], fields=[id2, cnt, name, goods, rowtime])
Sink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
+- GroupWindowAggregate(groupBy=[id1], window=[TumblingGroupWindow('w$, rowtime, 8000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1])
+- Exchange(distribution=[hash[id1]])
+- Calc(select=[id1, rowtime, text, _UTF-16LE'#' AS $f3])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
+- GroupWindowAggregate(groupBy=[id1], window=[SlidingGroupWindow('w$, rowtime, 6000, 12000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1])
+- Exchange(distribution=[hash[id1]])
+- Calc(select=[id1, rowtime, text, _UTF-16LE'*' AS $f3])
......
......@@ -19,7 +19,7 @@ limitations under the License.
<TestCase name="testSingleSink">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
+- LogicalProject(cnt=[$1])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
......@@ -28,7 +28,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
+- Calc(select=[cnt])
+- HashAggregate(isMerge=[true], groupBy=[a], select=[a, Final_COUNT(count1$0) AS cnt])
+- Exchange(distribution=[hash[a]])
......@@ -41,14 +41,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a])
<TestCase name="testMultiSinks">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink1`], fields=[total_sum])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[total_sum])
+- LogicalAggregate(group=[{}], total_sum=[SUM($0)])
+- LogicalProject(sum_a=[$1])
+- LogicalAggregate(group=[{0}], sum_a=[SUM($1)])
+- LogicalProject(c=[$2], a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
LogicalSink(name=[`default_catalog`.`default_database`.`sink2`], fields=[total_min])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[total_min])
+- LogicalAggregate(group=[{}], total_min=[MIN($0)])
+- LogicalProject(sum_a=[$1])
+- LogicalAggregate(group=[{0}], sum_a=[SUM($1)])
......@@ -65,13 +65,13 @@ Calc(select=[sum_a], reuse_id=[1])
+- Calc(select=[c, a])
+- BoundedStreamScan(table=[[default_catalog, default_database, MyTable]], fields=[a, b, c])
Sink(name=[`default_catalog`.`default_database`.`sink1`], fields=[total_sum])
LegacySink(name=[`default_catalog`.`default_database`.`sink1`], fields=[total_sum])
+- HashAggregate(isMerge=[true], select=[Final_SUM(sum$0) AS total_sum])
+- Exchange(distribution=[single])
+- LocalHashAggregate(select=[Partial_SUM(sum_a) AS sum$0])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`sink2`], fields=[total_min])
LegacySink(name=[`default_catalog`.`default_database`.`sink2`], fields=[total_min])
+- HashAggregate(isMerge=[true], select=[Final_MIN(min$0) AS total_min])
+- Exchange(distribution=[single])
+- LocalHashAggregate(select=[Partial_MIN(sum_a) AS min$0])
......
......@@ -22,14 +22,14 @@ limitations under the License.
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Sort(orderBy=[b ASC, c ASC])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
......@@ -41,14 +41,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
+- Sort(orderBy=[b ASC, c ASC])
+- Exchange(distribution=[hash[b, c]])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
......@@ -61,14 +61,14 @@ Sink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b,
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], EXPR$1=[1:BIGINT], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Sort(orderBy=[c ASC])
+- Calc(select=[a, 1:BIGINT AS EXPR$1, c])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
......@@ -81,14 +81,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], EXPR$1=[1:BIGINT], EXPR$2=[1:BIGINT])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Calc(select=[a, 1:BIGINT AS EXPR$1, 1:BIGINT AS EXPR$2])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
......
......@@ -19,14 +19,14 @@ limitations under the License.
<TestCase name="testAppendSink">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[d, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[d, c])
+- LogicalProject(EXPR$0=[+($0, $1)], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[d, c], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[d, c], changelogMode=[NONE])
+- Calc(select=[+(a, b) AS EXPR$0, c], changelogMode=[I])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable]], fields=[a, b, c], changelogMode=[I])
]]>
......@@ -35,7 +35,7 @@ Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[d, c], ch
<TestCase name="testUpsertAndUpsertSink">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[b, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[b, cnt])
+- LogicalAggregate(group=[{0}], frequency=[COUNT($1)])
+- LogicalProject(cnt=[$1], b=[$0])
+- LogicalFilter(condition=[<($0, 4)])
......@@ -43,7 +43,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[b
+- LogicalProject(b=[$1], a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[b, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[b, cnt])
+- LogicalProject(b=[$0], cnt=[$1])
+- LogicalFilter(condition=[AND(>=($0, 4), <($0, 6))])
+- LogicalAggregate(group=[{0}], cnt=[COUNT($1)])
......@@ -58,13 +58,13 @@ GroupAggregate(groupBy=[b], select=[b, COUNT(a) AS cnt], changelogMode=[I,UB,UA]
+- Calc(select=[b, a], changelogMode=[I])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable]], fields=[a, b, c], changelogMode=[I])
Sink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[b, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink1`], fields=[b, cnt], changelogMode=[NONE])
+- GroupAggregate(groupBy=[cnt], select=[cnt, COUNT_RETRACT(b) AS frequency], changelogMode=[I,UA,D])
+- Exchange(distribution=[hash[cnt]], changelogMode=[I,UB,UA])
+- Calc(select=[b, cnt], where=[<(b, 4)], changelogMode=[I,UB,UA])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[b, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[b, cnt], changelogMode=[NONE])
+- Calc(select=[b, cnt], where=[AND(>=(b, 4), <(b, 6))], changelogMode=[I,UB,UA])
+- Reused(reference_id=[1])
]]>
......@@ -73,14 +73,14 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink2`], fields=[b, cnt],
<TestCase name="testAppendUpsertAndRetractSink">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b])
+- LogicalUnion(all=[true])
:- LogicalProject(a=[$0], b=[$1])
: +- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
+- LogicalProject(d=[$0], e=[$1])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable2]])
LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[total_sum])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[total_sum])
+- LogicalAggregate(group=[{}], total_sum=[SUM($0)])
+- LogicalProject(a=[$0])
+- LogicalUnion(all=[true])
......@@ -93,7 +93,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[t
+- LogicalProject(i=[$0], j=[$1])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable3]])
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[total_min])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[total_min])
+- LogicalAggregate(group=[{}], total_min=[MIN($0)])
+- LogicalProject(a=[$0])
+- LogicalUnion(all=[true])
......@@ -115,7 +115,7 @@ Union(all=[true], union=[a, b], changelogMode=[I], reuse_id=[1])
+- Calc(select=[d, e], changelogMode=[I])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable2]], fields=[d, e, f], changelogMode=[I])
Sink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink`], fields=[a, b], changelogMode=[NONE])
+- Reused(reference_id=[1])
Exchange(distribution=[single], changelogMode=[I], reuse_id=[2])
......@@ -125,11 +125,11 @@ Exchange(distribution=[single], changelogMode=[I], reuse_id=[2])
+- Calc(select=[i, j], changelogMode=[I])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable3]], fields=[i, j, k], changelogMode=[I])
Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[total_sum], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[total_sum], changelogMode=[NONE])
+- GroupAggregate(select=[SUM(a) AS total_sum], changelogMode=[I,UB,UA])
+- Reused(reference_id=[2])
Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[total_min], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[total_min], changelogMode=[NONE])
+- GroupAggregate(select=[MIN(a) AS total_min], changelogMode=[I,UA])
+- Reused(reference_id=[2])
]]>
......@@ -138,14 +138,14 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[total_min
<TestCase name="testRetractAndUpsertSink">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[b, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[b, cnt])
+- LogicalProject(b=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($0, 4)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT($1)])
+- LogicalProject(b=[$1], a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[b, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[b, cnt])
+- LogicalProject(b=[$0], cnt=[$1])
+- LogicalFilter(condition=[AND(>=($0, 4), <($0, 6))])
+- LogicalAggregate(group=[{0}], cnt=[COUNT($1)])
......@@ -160,11 +160,11 @@ GroupAggregate(groupBy=[b], select=[b, COUNT(a) AS cnt], changelogMode=[I,UB,UA]
+- Calc(select=[b, a], changelogMode=[I])
+- DataStreamScan(table=[[default_catalog, default_database, MyTable]], fields=[a, b, c], changelogMode=[I])
Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[b, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[b, cnt], changelogMode=[NONE])
+- Calc(select=[b, cnt], where=[<(b, 4)], changelogMode=[I,UB,UA])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[b, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[b, cnt], changelogMode=[NONE])
+- Calc(select=[b, cnt], where=[AND(>=(b, 4), <(b, 6))], changelogMode=[I,UB,UA])
+- Reused(reference_id=[1])
]]>
......@@ -173,7 +173,7 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[b, cnt],
<TestCase name="testRetractSink1">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a, cnt])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
......@@ -181,7 +181,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a, cnt], changelogMode=[NONE])
+- GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], changelogMode=[I,UB,UA])
+- Exchange(distribution=[hash[a]], changelogMode=[I])
+- Calc(select=[a], changelogMode=[I])
......@@ -192,7 +192,7 @@ Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[a, cnt],
<TestCase name="testRetractSink2">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[cnt, a])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[cnt, a])
+- LogicalAggregate(group=[{0}], a=[COUNT($1)])
+- LogicalProject(cnt=[$1], a=[$0])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -202,7 +202,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[c
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[cnt, a], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[cnt, a], changelogMode=[NONE])
+- GroupAggregate(groupBy=[cnt], select=[cnt, COUNT_RETRACT(a) AS a], changelogMode=[I,UB,UA,D])
+- Exchange(distribution=[hash[cnt]], changelogMode=[I,UB,UA])
+- GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], changelogMode=[I,UB,UA])
......@@ -215,7 +215,7 @@ Sink(name=[`default_catalog`.`default_database`.`retractSink`], fields=[cnt, a],
<TestCase name="testUpsertSink1">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
+- LogicalProject(a=[$0])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
......@@ -223,7 +223,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a,
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt], changelogMode=[NONE])
+- GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], changelogMode=[I,UA])
+- Exchange(distribution=[hash[a]], changelogMode=[I])
+- Calc(select=[a], changelogMode=[I])
......@@ -234,7 +234,7 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt],
<TestCase name="testUpsertSink2">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1, b, c1])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1, b, c1])
+- LogicalProject(a1=[$2], b=[$3], c1=[$1])
+- LogicalFilter(condition=[=($2, $0)])
+- LogicalJoin(condition=[true], joinType=[inner])
......@@ -258,7 +258,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1, b, c1], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1, b, c1], changelogMode=[NONE])
+- Calc(select=[a1, b, c1], changelogMode=[I])
+- Join(joinType=[InnerJoin], where=[=(a1, a3)], select=[a3, c1, a1, b, c2], leftInputSpec=[NoUniqueKey], rightInputSpec=[NoUniqueKey], changelogMode=[I])
:- Exchange(distribution=[hash[a3]], changelogMode=[I])
......@@ -279,7 +279,7 @@ Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a1, b, c1
<TestCase name="testUpsertSinkWithFilter">
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt])
+- LogicalProject(a=[$0], cnt=[$1])
+- LogicalFilter(condition=[<($1, 10)])
+- LogicalAggregate(group=[{0}], cnt=[COUNT()])
......@@ -289,7 +289,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a,
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt], changelogMode=[NONE])
LegacySink(name=[`default_catalog`.`default_database`.`upsertSink`], fields=[a, cnt], changelogMode=[NONE])
+- Calc(select=[a, cnt], where=[<(cnt, 10)], changelogMode=[I,UB,UA])
+- GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt], changelogMode=[I,UB,UA])
+- Exchange(distribution=[hash[a]], changelogMode=[I])
......
......@@ -246,7 +246,7 @@ Calc(select=[a, b])
<TestCase name="testMultipleWindowAggregates">
<Resource name="explain">
<![CDATA[== Abstract Syntax Tree ==
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
+- LogicalProject(id1=[$1], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject($f0=[HOP(TUMBLE_ROWTIME($0), 12000:INTERVAL SECOND, 4000:INTERVAL SECOND)], id1=[$1], text=[$2], $f3=[_UTF-16LE'*'])
......@@ -259,7 +259,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a
+- LogicalWatermarkAssigner(rowtime=[rowtime], watermark=[-($1, 0:INTERVAL MILLISECOND)])
+- LogicalTableScan(table=[[default_catalog, default_database, T2]])
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
+- LogicalProject(id1=[$1], EXPR$1=[$2])
+- LogicalAggregate(group=[{0, 1}], EXPR$1=[LISTAGG($2, $3)])
+- LogicalProject($f0=[$TUMBLE($1, 9000:INTERVAL SECOND)], id1=[$0], text=[$2], $f3=[_UTF-16LE'-'])
......@@ -270,7 +270,7 @@ LogicalSink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a
+- LogicalWatermarkAssigner(rowtime=[rowtime], watermark=[-($1, 0:INTERVAL MILLISECOND)])
+- LogicalTableScan(table=[[default_catalog, default_database, T2]])
LogicalSink(name=[`default_catalog`.`default_database`.`appendSink3`], fields=[a, b])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`appendSink3`], fields=[a, b])
+- LogicalAggregate(group=[{0}], EXPR$1=[COUNT($1)])
+- LogicalProject(id1=[$1], text=[$2])
+- LogicalAggregate(group=[{0, 1}], text=[LISTAGG($2, $3)])
......@@ -295,20 +295,20 @@ Exchange(distribution=[hash[id1]], reuse_id=[2])
+- Calc(select=[rowtime, id1, text, _UTF-16LE'#' AS $f3])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink1`], fields=[a, b])
+- GroupWindowAggregate(groupBy=[id1], window=[SlidingGroupWindow('w$, $f0, 4000, 12000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1])
+- Exchange(distribution=[hash[id1]])
+- Calc(select=[w$rowtime AS $f0, id1, text, _UTF-16LE'*' AS $f3])
+- GroupWindowAggregate(groupBy=[id1], window=[TumblingGroupWindow('w$, rowtime, 6000)], properties=[w$start, w$end, w$rowtime, w$proctime], select=[id1, LISTAGG(text, $f3) AS text, start('w$) AS w$start, end('w$) AS w$end, rowtime('w$) AS w$rowtime, proctime('w$) AS w$proctime])
+- Reused(reference_id=[2])
Sink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink2`], fields=[a, b])
+- GroupWindowAggregate(groupBy=[id1], window=[TumblingGroupWindow('w$, rowtime, 9000)], select=[id1, LISTAGG(text, $f3) AS EXPR$1])
+- Exchange(distribution=[hash[id1]])
+- Calc(select=[rowtime, id1, text, _UTF-16LE'-' AS $f3])
+- Reused(reference_id=[1])
Sink(name=[`default_catalog`.`default_database`.`appendSink3`], fields=[a, b])
LegacySink(name=[`default_catalog`.`default_database`.`appendSink3`], fields=[a, b])
+- GlobalGroupAggregate(groupBy=[id1], select=[id1, COUNT(count$0) AS EXPR$1])
+- Exchange(distribution=[hash[id1]])
+- LocalGroupAggregate(groupBy=[id1], select=[id1, COUNT(text) AS count$0])
......
......@@ -22,14 +22,14 @@ limitations under the License.
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
</Resource>
......@@ -40,14 +40,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
+- LogicalProject(a=[$0], b=[$1], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b, c])
+- Exchange(distribution=[hash[b, c]])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
......@@ -59,14 +59,14 @@ Sink(name=[`default_catalog`.`default_database`.`sinkShuffleBy`], fields=[a, b,
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], EXPR$1=[1:BIGINT], c=[$2])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Calc(select=[a, 1:BIGINT AS EXPR$1, c])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
......@@ -78,14 +78,14 @@ Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
</Resource>
<Resource name="planBefore">
<![CDATA[
LogicalSink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LogicalLegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- LogicalProject(a=[$0], EXPR$1=[1:BIGINT], EXPR$2=[1:BIGINT])
+- LogicalTableScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]])
]]>
</Resource>
<Resource name="planAfter">
<![CDATA[
Sink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
LegacySink(name=[`default_catalog`.`default_database`.`sink`], fields=[a, b, c])
+- Calc(select=[a, 1:BIGINT AS EXPR$1, 1:BIGINT AS EXPR$2])
+- LegacyTableSourceScan(table=[[default_catalog, default_database, MyTable, source: [TestTableSource(a, b, c)]]], fields=[a, b, c])
]]>
......
......@@ -26,7 +26,7 @@ import org.apache.flink.table.types.logical.{BigIntType, IntType}
import org.junit.Test
class SinkTest extends TableTestBase {
class LegacySinkTest extends TableTestBase {
val LONG = new BigIntType()
val INT = new IntType()
......
......@@ -23,7 +23,7 @@ import org.apache.flink.table.api.{DataTypes, TableSchema, ValidationException}
import org.apache.flink.table.catalog.{CatalogViewImpl, ObjectPath}
import org.apache.flink.table.planner.JHashMap
import org.apache.flink.table.planner.plan.hint.OptionsHintTest.{IS_BOUNDED, Param}
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalSink
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalLegacySink
import org.apache.flink.table.planner.utils.{OptionsTableSink, TableTestBase, TableTestUtil, TestingStatementSet, TestingTableEnvironment}
import org.hamcrest.Matchers._
......@@ -98,8 +98,8 @@ class OptionsHintTest(param: Param)
val testStmtSet = stmtSet.asInstanceOf[TestingStatementSet]
val relNodes = testStmtSet.getOperations.map(util.getPlanner.translateToRel)
assertThat(relNodes.length, is(1))
assert(relNodes.head.isInstanceOf[LogicalSink])
val sink = relNodes.head.asInstanceOf[LogicalSink]
assert(relNodes.head.isInstanceOf[LogicalLegacySink])
val sink = relNodes.head.asInstanceOf[LogicalLegacySink]
assertEquals("{k1=#v1, k2=v2, k5=v5}",
sink.sink.asInstanceOf[OptionsTableSink].props.toString)
}
......
......@@ -22,7 +22,7 @@ import org.apache.flink.api.scala._
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.{FlinkLogicalAggregate, FlinkLogicalCalc, FlinkLogicalExpand, FlinkLogicalJoin, FlinkLogicalSink, FlinkLogicalLegacyTableSourceScan, FlinkLogicalValues}
import org.apache.flink.table.planner.plan.nodes.logical.{FlinkLogicalAggregate, FlinkLogicalCalc, FlinkLogicalExpand, FlinkLogicalJoin, FlinkLogicalLegacySink, FlinkLogicalLegacyTableSourceScan, FlinkLogicalValues}
import org.apache.flink.table.planner.plan.optimize.program._
import org.apache.flink.table.planner.plan.rules.FlinkBatchRuleSets
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
......@@ -76,7 +76,7 @@ class FlinkAggregateRemoveRuleTest extends TableTestBase {
FlinkLogicalValues.CONVERTER,
FlinkLogicalExpand.CONVERTER,
FlinkLogicalLegacyTableSourceScan.CONVERTER,
FlinkLogicalSink.CONVERTER))
FlinkLogicalLegacySink.CONVERTER))
.setRequiredOutputTraits(Array(FlinkConventions.LOGICAL))
.build())
util.replaceBatchProgram(programs)
......
......@@ -26,7 +26,7 @@ import org.apache.flink.table.types.logical.{BigIntType, IntType, VarCharType}
import org.junit.Test
class SinkTest extends TableTestBase {
class LegacySinkTest extends TableTestBase {
private val util = streamTestUtil()
util.addDataStream[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
......
......@@ -29,7 +29,7 @@ import org.apache.flink.types.Row
import org.junit.Test
class TableSinkValidationTest extends TableTestBase {
class LegacyTableSinkValidationTest extends TableTestBase {
@Test(expected = classOf[ValidationException])
def testAppendSinkOnUpdatingTable(): Unit = {
......
......@@ -32,7 +32,7 @@ import java.util.TimeZone
import scala.collection.JavaConverters._
class TableSinkITCase extends BatchTestBase {
class LegacyTableSinkITCase extends BatchTestBase {
@Test
def testDecimalOutputFormatTableSink(): Unit = {
......
......@@ -39,7 +39,7 @@ import java.util.TimeZone
import scala.collection.JavaConverters._
class TableSinkITCase extends AbstractTestBase {
class LegacyTableSinkITCase extends AbstractTestBase {
@Test
def testStreamTableSink(): Unit = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册