提交 0385651f 编写于 作者: A Aljoscha Krettek

[scala] Add Scalastyle, use scalastyle-config.xml from Spark

上级 fd280981
.cache
scalastyle-output.xml
.classpath
.idea
.metadata
......
......@@ -71,7 +71,8 @@ object ConnectedComponents {
// assign the initial components (equal to the vertex id)
val vertices = getVerticesDataSet(env).map { id => (id, id) }
// undirected edges by emitting for each input edge the input edges itself and an inverted version
// undirected edges by emitting for each input edge the input edges itself and an inverted
// version
val edges = getEdgesDataSet(env).flatMap { edge => Seq(edge, (edge._2, edge._1)) }
// open a delta iteration
......@@ -113,13 +114,15 @@ object ConnectedComponents {
outputPath = args(2)
maxIterations = args(3).toInt
} else {
System.err.println("Usage: ConnectedComponents <vertices path> <edges path> <result path> <max number of iterations>")
System.err.println("Usage: ConnectedComponents <vertices path> <edges path> <result path>" +
" <max number of iterations>")
false
}
} else {
System.out.println("Executing Connected Components example with built-in default data.")
System.out.println(" Provide parameters to read input data from a file.")
System.out.println(" Usage: ConnectedComponents <vertices path> <edges path> <result path> <max number of iterations>")
System.out.println(" Usage: ConnectedComponents <vertices path> <edges path> <result path>" +
" <max number of iterations>")
}
true
}
......
......@@ -110,9 +110,9 @@ object EnumTrianglesBasic {
// *************************************************************************
/**
* Builds triads (triples of vertices) from pairs of edges that share a vertex.
* The first vertex of a triad is the shared vertex, the second and third vertex are ordered by vertexId.
* Assumes that input edges share the first vertex and are in ascending order of the second vertex.
* Builds triads (triples of vertices) from pairs of edges that share a vertex. The first vertex
* of a triad is the shared vertex, the second and third vertex are ordered by vertexId. Assumes
* that input edges share the first vertex and are in ascending order of the second vertex.
*/
class TriadBuilder extends GroupReduceFunction[Edge, Triad] {
......@@ -162,7 +162,9 @@ object EnumTrianglesBasic {
if (fileOutput) {
env.readCsvFile[Edge](edgePath, fieldDelimiter = ' ', includedFields = Array(0, 1))
} else {
val edges = EnumTrianglesData.EDGES.map{ case Array(v1, v2) => new Edge(v1.asInstanceOf[Int], v2.asInstanceOf[Int]) }
val edges = EnumTrianglesData.EDGES.map {
case Array(v1, v2) => new Edge(v1.asInstanceOf[Int], v2.asInstanceOf[Int])
}
env.fromCollection(edges)
}
}
......
......@@ -86,7 +86,9 @@ object PageRankBasic {
// initialize lists
.map(e => AdjacencyList(e.sourceId, Array(e.targetId)))
// concatenate lists
.groupBy("sourceId").reduce((l1, l2) => AdjacencyList(l1.sourceId, l1.targetIds ++ l2.targetIds))
.groupBy("sourceId").reduce {
(l1, l2) => AdjacencyList(l1.sourceId, l1.targetIds ++ l2.targetIds)
}
// start iteration
val finalRanks = pagesWithRanks.iterateWithTermination(maxIterations) {
......
......@@ -49,17 +49,17 @@ object TransitiveClosureNaive {
(prev, next, out: Collector[(Long, Long)]) => {
val prevPaths = prev.toSet
for (n <- next)
if (!prevPaths.contains(n))
out.collect(n)
if (!prevPaths.contains(n)) out.collect(n)
}
}
(nextPaths, terminate)
}
if (fileOutput)
if (fileOutput) {
paths.writeAsCsv(outputPath, "\n", " ")
else
} else {
paths.print()
}
env.execute("Scala Transitive Closure Example")
......@@ -81,15 +81,18 @@ object TransitiveClosureNaive {
maxIterations = Integer.parseInt(programArguments(2))
}
else {
System.err.println("Usage: TransitiveClosure <edges path> <result path> <max number of iterations>")
System.err.println("Usage: TransitiveClosure <edges path> <result path> <max number of " +
"iterations>")
return false
}
}
else {
System.out.println("Executing TransitiveClosure example with default parameters and built-in default data.")
System.out.println("Executing TransitiveClosure example with default parameters and " +
"built-in default data.")
System.out.println(" Provide parameters to read input data from files.")
System.out.println(" See the documentation for the correct format of input files.")
System.out.println(" Usage: TransitiveClosure <edges path> <result path> <max number of iterations>")
System.out.println(" Usage: TransitiveClosure <edges path> <result path> <max number of " +
"iterations>")
}
true
}
......
......@@ -37,7 +37,7 @@ object PiEstimation {
val y = Math.random()
if (x * x + y * y < 1) 1L else 0L
}
.reduce(_+_)
.reduce(_ + _)
// ratio of samples in upper right quadrant vs total samples gives surface of upper
// right quadrant, times 4 gives surface of whole unit circle, i.e. PI
......
......@@ -104,7 +104,7 @@ object LinearRegression {
Params(theta0 / a, theta1 / a)
}
def +(other: Params) = {
def + (other: Params) = {
Params(theta0 + other.theta0, theta1 + other.theta1)
}
}
......
......@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala
import org.apache.commons.lang3.Validate
......@@ -825,6 +824,28 @@ class DataSet[T: ClassTag](private[flink] val set: JavaDataSet[T]) {
wrap(result)
}
/**
* Creates a new DataSet by performing delta (or workset) iterations using the given step
* function. At the beginning `this` DataSet is the solution set and `workset` is the Workset.
* The iteration step function gets the current solution set and workset and must output the
* delta for the solution set and the workset for the next iteration.
*
* Note: The syntax of delta iterations are very likely going to change soon.
*/
def iterateDelta[R: ClassTag](workset: DataSet[R], maxIterations: Int, keyFields: Array[String])(
stepFunction: (DataSet[T], DataSet[R]) => (DataSet[T], DataSet[R])) = {
val fieldIndices = fieldNames2Indices(set.getType, keyFields)
val key = new FieldPositionKeys[T](fieldIndices, set.getType, false)
val iterativeSet = new DeltaIteration[T, R](
set.getExecutionEnvironment, set.getType, set, workset.set, key, maxIterations)
val (newSolution, newWorkset) = stepFunction(
wrap(iterativeSet.getSolutionSet),
wrap(iterativeSet.getWorkset))
val result = iterativeSet.closeWith(newSolution.set, newWorkset.set)
wrap(result)
}
// -------------------------------------------------------------------------------------------
// Custom Operators
// -------------------------------------------------------------------------------------------
......
......@@ -22,7 +22,7 @@ import java.util.UUID
import org.apache.commons.lang3.Validate
import org.apache.flink.api.common.JobExecutionResult
import org.apache.flink.api.java.io._
import org.apache.flink.api.java.typeutils.{TupleTypeInfoBase, BasicTypeInfo}
import org.apache.flink.api.java.typeutils.{ValueTypeInfo, TupleTypeInfoBase, BasicTypeInfo}
import org.apache.flink.api.scala.operators.ScalaCsvInputFormat
import org.apache.flink.core.fs.Path
......@@ -30,7 +30,7 @@ import org.apache.flink.api.java.{ExecutionEnvironment => JavaEnv}
import org.apache.flink.api.common.io.{InputFormat, FileInputFormat}
import org.apache.flink.api.java.operators.DataSource
import org.apache.flink.types.TypeInformation
import org.apache.flink.types.{StringValue, TypeInformation}
import org.apache.flink.util.{NumberSequenceIterator, SplittableIterator}
import scala.collection.JavaConverters._
......@@ -104,6 +104,27 @@ class ExecutionEnvironment(javaEnv: JavaEnv) {
wrap(source)
}
/**
* Creates a DataSet of Strings produced by reading the given file line wise.
* This method is similar to [[readTextFile]], but it produces a DataSet with mutable
* [[StringValue]] objects, rather than Java Strings. StringValues can be used to tune
* implementations to be less object and garbage collection heavy.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName The name of the character set used to read the file. Default is UTF-0
*/
def readTextFileWithValue(
filePath: String,
charsetName: String = "UTF-8"): DataSet[StringValue] = {
Validate.notNull(filePath, "The file path may not be null.")
val format = new TextValueInputFormat(new Path(filePath))
format.setCharsetName(charsetName)
val source = new DataSource[StringValue](
javaEnv, format, new ValueTypeInfo[StringValue](classOf[StringValue]))
wrap(source)
}
/**
* Creates a DataSet by reading the given CSV file. The type parameter must be used to specify
* a Tuple type that has the same number of fields as there are fields in the CSV file. If the
......@@ -337,9 +358,10 @@ class ExecutionEnvironment(javaEnv: JavaEnv) {
def createProgramPlan(jobName: String = "") = {
if (jobName.isEmpty) {
javaEnv.createProgramPlan()
} else
} else {
javaEnv.createProgramPlan(jobName)
}
}
}
object ExecutionEnvironment {
......@@ -360,7 +382,8 @@ object ExecutionEnvironment {
* of parallelism of the local environment is the number of hardware contexts (CPU cores/threads).
*/
def createLocalEnvironment(
degreeOfParallelism: Int = Runtime.getRuntime.availableProcessors()) : ExecutionEnvironment = {
degreeOfParallelism: Int = Runtime.getRuntime.availableProcessors())
: ExecutionEnvironment = {
val javaEnv = JavaEnv.createLocalEnvironment()
javaEnv.setDegreeOfParallelism(degreeOfParallelism)
new ExecutionEnvironment(javaEnv)
......
......@@ -33,8 +33,8 @@ import scala.reflect.ClassTag
/**
* A specific [[DataSet]] that results from a `coGroup` operation. The result of a default coGroup is
* a tuple containing two arrays of values from the two sides of the coGroup. The result of the
* A specific [[DataSet]] that results from a `coGroup` operation. The result of a default coGroup
* is a tuple containing two arrays of values from the two sides of the coGroup. The result of the
* coGroup can be changed by specifying a custom coGroup function using the `apply` method or by
* providing a [[RichCoGroupFunction]].
*
......
......@@ -15,8 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.codegen
private[flink] class Counter {
......
......@@ -15,8 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.codegen
import scala.language.implicitConversions
......@@ -50,11 +48,12 @@ private[flink] trait TreeGen[C <: Context] { this: MacroContextHolder[C] with Ty
reify(c.Expr(source).splice.asInstanceOf[T]).tree
def maybeMkAsInstanceOf[S: c.WeakTypeTag, T: c.WeakTypeTag](source: Tree): Tree = {
if (weakTypeOf[S] <:< weakTypeOf[T])
if (weakTypeOf[S] <:< weakTypeOf[T]) {
source
else
} else {
mkAsInstanceOf[T](source)
}
}
// def mkIdent(target: Symbol): Tree = Ident(target) setType target.tpe
def mkSelect(rootModule: String, path: String*): Tree =
......
......@@ -15,8 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.codegen
import scala.Option.option2Iterable
......@@ -107,11 +105,12 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C]
appliedType(d.asType.toType, dArgs)
}
if (dTpe <:< tpe)
if (dTpe <:< tpe) {
Some(analyze(dTpe))
else
} else {
None
}
}
val errors = subTypes flatMap { _.findByType[UnsupportedDescriptor] }
......@@ -150,7 +149,11 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C]
case true =>
Some(
FieldAccessor(
bGetter, bSetter, bTpe, isBaseField = true, analyze(bTpe.termSymbol.asMethod.returnType)))
bGetter,
bSetter,
bTpe,
isBaseField = true,
analyze(bTpe.termSymbol.asMethod.returnType)))
case false => None
}
}
......@@ -167,7 +170,9 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C]
desc match {
case desc @ BaseClassDescriptor(_, _, getters, baseSubTypes) =>
desc.copy(getters = getters map updateField, subTypes = baseSubTypes map wireBaseFields)
desc.copy(
getters = getters map updateField,
subTypes = baseSubTypes map wireBaseFields)
case desc @ CaseClassDescriptor(_, _, _, _, getters) =>
desc.copy(getters = getters map updateField)
case _ => desc
......@@ -221,7 +226,7 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C]
case errs @ _ :: _ =>
val msgs = errs flatMap { f =>
(f: @unchecked) match {
case FieldAccessor(fgetter, _, _, _, UnsupportedDescriptor(_, fTpe, errors)) =>
case FieldAccessor(fgetter, _,_,_, UnsupportedDescriptor(_, fTpe, errors)) =>
errors map { err => "Field " + fgetter.name + ": " + fTpe + " - " + err }
}
}
......
......@@ -15,8 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.codegen
import scala.language.postfixOps
......@@ -122,7 +120,8 @@ private[flink] trait TypeDescriptors[C <: Context] { this: MacroContextHolder[C]
id: Int, tpe: Type, override val getters: Seq[FieldAccessor], subTypes: Seq[UDTDescriptor])
extends UDTDescriptor {
override def flatten = this +: ((getters flatMap { _.desc.flatten }) ++ (subTypes flatMap { _.flatten }))
override def flatten =
this +: ((getters flatMap { _.desc.flatten }) ++ (subTypes flatMap { _.flatten }))
override def canBeKey = flatten forall { f => f.canBeKey }
override def select(path: List[String]): Seq[Option[UDTDescriptor]] = path match {
......@@ -151,7 +150,8 @@ private[flink] trait TypeDescriptors[C <: Context] { this: MacroContextHolder[C]
override def hashCode = (id, tpe, ctor, getters).hashCode
override def equals(that: Any) = that match {
case CaseClassDescriptor(thatId, thatTpe, thatMutable, thatCtor, thatGetters) =>
(id, tpe, mutable, ctor, getters).equals(thatId, thatTpe, thatMutable, thatCtor, thatGetters)
(id, tpe, mutable, ctor, getters).equals(
thatId, thatTpe, thatMutable, thatCtor, thatGetters)
case _ => false
}
......@@ -164,7 +164,12 @@ private[flink] trait TypeDescriptors[C <: Context] { this: MacroContextHolder[C]
}
}
case class FieldAccessor(getter: Symbol, setter: Symbol, tpe: Type, isBaseField: Boolean, desc: UDTDescriptor)
case class FieldAccessor(
getter: Symbol,
setter: Symbol,
tpe: Type,
isBaseField: Boolean,
desc: UDTDescriptor)
case class RecursiveDescriptor(id: Int, tpe: Type, refId: Int) extends UDTDescriptor {
override def flatten = Seq(this)
......
......@@ -15,8 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.codegen
import org.apache.flink.api.common.typeutils.TypeSerializer
......@@ -116,14 +114,16 @@ private[flink] trait TypeInformationGen[C <: Context] {
}
}
def mkValueTypeInfo[T <: Value : c.WeakTypeTag](desc: UDTDescriptor): c.Expr[TypeInformation[T]] = {
def mkValueTypeInfo[T <: Value : c.WeakTypeTag](
desc: UDTDescriptor): c.Expr[TypeInformation[T]] = {
val tpeClazz = c.Expr[Class[T]](Literal(Constant(desc.tpe)))
reify {
new ValueTypeInfo[T](tpeClazz.splice)
}
}
def mkWritableTypeInfo[T <: Writable : c.WeakTypeTag](desc: UDTDescriptor): c.Expr[TypeInformation[T]] = {
def mkWritableTypeInfo[T <: Writable : c.WeakTypeTag](
desc: UDTDescriptor): c.Expr[TypeInformation[T]] = {
val tpeClazz = c.Expr[Class[T]](Literal(Constant(desc.tpe)))
reify {
new WritableTypeInfo[T](tpeClazz.splice)
......@@ -153,7 +153,8 @@ private[flink] trait TypeInformationGen[C <: Context] {
c.Expr[T](result)
}
// def mkCaseClassTypeInfo[T: c.WeakTypeTag](desc: CaseClassDescriptor): c.Expr[TypeInformation[T]] = {
// def mkCaseClassTypeInfo[T: c.WeakTypeTag](
// desc: CaseClassDescriptor): c.Expr[TypeInformation[T]] = {
// val tpeClazz = c.Expr[Class[_]](Literal(Constant(desc.tpe)))
// val caseFields = mkCaseFields(desc)
// reify {
......@@ -178,10 +179,12 @@ private[flink] trait TypeInformationGen[C <: Context] {
// c.Expr(mkMap(fields))
// }
//
// protected def getFields(name: String, desc: UDTDescriptor): Seq[(String, UDTDescriptor)] = desc match {
// protected def getFields(name: String, desc: UDTDescriptor): Seq[(String, UDTDescriptor)] =
// desc match {
// // Flatten product types
// case CaseClassDescriptor(_, _, _, _, getters) =>
// getters filterNot { _.isBaseField } flatMap { f => getFields(name + "." + f.getter.name, f.desc) }
// getters filterNot { _.isBaseField } flatMap {
// f => getFields(name + "." + f.getter.name, f.desc) }
// case _ => Seq((name, desc))
// }
}
......@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.typeutils
import org.apache.flink.api.common.typeutils.{TypeComparator, TypeSerializer}
......
......@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.typeutils
import org.apache.flink.api.common.typeutils.TypeSerializer
......
......@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.typeutils
import org.apache.flink.api.java.typeutils.{AtomicType, TupleTypeInfoBase}
......@@ -79,8 +78,8 @@ abstract class ScalaTupleTypeInfo[T <: Product](
def getFieldIndices(fields: Array[String]): Array[Int] = {
val result = fields map { x => fieldNames.indexOf(x) }
if (result.contains(-1)) {
throw new IllegalArgumentException("Fields '" + fields.mkString(", ") + "' are not valid for" +
" " + tupleClass + " with fields '" + fieldNames.mkString(", ") + "'.")
throw new IllegalArgumentException("Fields '" + fields.mkString(", ") +
"' are not valid for " + tupleClass + " with fields '" + fieldNames.mkString(", ") + "'.")
}
result
}
......
......@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.typeutils
import scala.reflect.macros.Context
......
......@@ -71,7 +71,9 @@ private[flink] abstract class UnfinishedKeyPairOperation[T, O, R](
* This only works on a CaseClass [[DataSet]].
*/
def where(firstLeftField: String, otherLeftFields: String*) = {
val fieldIndices = fieldNames2Indices(leftSet.set.getType, firstLeftField +: otherLeftFields.toArray)
val fieldIndices = fieldNames2Indices(
leftSet.set.getType,
firstLeftField +: otherLeftFields.toArray)
val leftKey = new FieldPositionKeys[T](fieldIndices, leftSet.set.getType)
new HalfUnfinishedKeyPairOperation[T, O, R](this, leftKey)
......
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
......@@ -14,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala
import java.lang.reflect.Method
......@@ -149,7 +149,8 @@ class ScalaAPICompletenessTest {
checkMethods("SingleInputOperator", "DataSet",
classOf[SingleInputOperator[_, _, _]], classOf[DataSet[_]])
checkMethods("TwoInputOperator", "DataSet", classOf[TwoInputOperator[_, _, _, _]], classOf[DataSet[_]])
checkMethods("TwoInputOperator", "DataSet",
classOf[TwoInputOperator[_, _, _, _]], classOf[DataSet[_]])
checkMethods("SingleInputUdfOperator", "DataSet",
classOf[SingleInputUdfOperator[_, _, _]], classOf[DataSet[_]])
......
......@@ -46,7 +46,7 @@ class SemanticPropertiesTranslationTest {
try {
val env = ExecutionEnvironment.getExecutionEnvironment
val input = env.fromElements((3l, "test", 42))
val input = env.fromElements((3L, "test", 42))
input.map(new WildcardConstantMapper[(Long, String, Int)]).print()
val plan = env.createProgramPlan()
......@@ -83,7 +83,7 @@ class SemanticPropertiesTranslationTest {
try {
val env = ExecutionEnvironment.getExecutionEnvironment
val input = env.fromElements((3l, "test", 42))
val input = env.fromElements((3L, "test", 42))
input.map(new IndividualConstantMapper[Long, String, Int]).print()
val plan = env.createProgramPlan()
......@@ -120,8 +120,8 @@ class SemanticPropertiesTranslationTest {
try {
val env = ExecutionEnvironment.getExecutionEnvironment
val input1 = env.fromElements((3l, "test"))
val input2 = env.fromElements((3l, 3.1415))
val input1 = env.fromElements((3L, "test"))
val input2 = env.fromElements((3L, 3.1415))
input1.join(input2).where(0).equalTo(0)(
new ForwardingTupleJoin[Long, String, Long, Double]).print()
......
......@@ -100,7 +100,9 @@ class DeltaIterationTranslationTest {
assertEquals(classOf[IdentityMapper[_]], worksetMapper.getUserCodeWrapper.getUserCodeClass)
assertEquals(classOf[NextWorksetMapper], nextWorksetMapper.getUserCodeWrapper.getUserCodeClass)
assertEquals(
classOf[NextWorksetMapper],
nextWorksetMapper.getUserCodeWrapper.getUserCodeClass)
if (solutionSetJoin.getUserCodeWrapper.getUserCodeObject.isInstanceOf[WrappingFunction[_]]) {
......@@ -203,7 +205,8 @@ class DeltaIterationTranslationTest {
// val iteration: DeltaIteration[Tuple3[Double, Long, String], Tuple2[Double,
// String]] = initialSolutionSet.iterateDelta(initialWorkSet, 10, 1)
// try {
// iteration.getWorkset.coGroup(iteration.getSolutionSet).where(1).equalTo(2).`with`(new DeltaIterationTranslationTest.SolutionWorksetCoGroup1)
// iteration.getWorkset.coGroup(iteration.getSolutionSet).where(1).equalTo(2).`with`(
// new DeltaIterationTranslationTest.SolutionWorksetCoGroup1)
// fail("Accepted invalid program.")
// }
// catch {
......@@ -211,7 +214,8 @@ class DeltaIterationTranslationTest {
// }
// }
// try {
// iteration.getSolutionSet.coGroup(iteration.getWorkset).where(2).equalTo(1).`with`(new DeltaIterationTranslationTest.SolutionWorksetCoGroup2)
// iteration.getSolutionSet.coGroup(iteration.getWorkset).where(2).equalTo(1).`with`(
// new DeltaIterationTranslationTest.SolutionWorksetCoGroup2)
// fail("Accepted invalid program.")
// }
// catch {
......
......@@ -121,7 +121,9 @@ class ReduceTranslationTest {
assertEquals(keyValueInfo, reducer.getOperatorInfo.getOutputType)
assertEquals(keyValueInfo, keyProjector.getOperatorInfo.getInputType)
assertEquals(initialData.set.getType, keyProjector.getOperatorInfo.getOutputType)
assertEquals(classOf[KeyExtractingMapper[_, _]], keyExtractor.getUserCodeWrapper.getUserCodeClass)
assertEquals(
classOf[KeyExtractingMapper[_, _]],
keyExtractor.getUserCodeWrapper.getUserCodeClass)
assertTrue(keyExtractor.getInput.isInstanceOf[GenericDataSourceBase[_, _]])
}
catch {
......
......@@ -34,8 +34,8 @@ class TupleSerializerTest {
@Test
def testTuple1Int(): Unit = {
val testTuples =
Array(Tuple1(42), Tuple1(1), Tuple1(0), Tuple1(-1), Tuple1(Int.MaxValue), Tuple1(Int.MinValue))
val testTuples = Array(Tuple1(42), Tuple1(1), Tuple1(0), Tuple1(-1), Tuple1(Int.MaxValue),
Tuple1(Int.MinValue))
runTests(testTuples)
}
......
......@@ -522,8 +522,10 @@ under the License.
<exclude>**/*.creole</exclude>
<exclude>CONTRIBUTORS</exclude>
<exclude>DEPENDENCIES</exclude>
<!-- Build fiels -->
<!-- Build files -->
<exclude>tools/maven/checkstyle.xml</exclude>
<exclude>tools/maven/scalastyle-config.xml</exclude>
<exclude>**/scalastyle-output.xml</exclude>
<exclude>tools/maven/suppressions.xml</exclude>
<exclude>**/pom.xml</exclude>
<exclude>**/pom.hadoop2.xml</exclude>
......@@ -555,6 +557,29 @@ under the License.
<configLocation>/tools/maven/checkstyle.xml</configLocation>
<logViolationsToConsole>true</logViolationsToConsole>
</configuration>
</plugin>
<plugin>
<groupId>org.scalastyle</groupId>
<artifactId>scalastyle-maven-plugin</artifactId>
<version>0.5.0</version>
<configuration>
<verbose>false</verbose>
<failOnViolation>true</failOnViolation>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<failOnWarning>false</failOnWarning>
<sourceDirectory>${basedir}/src/main/scala</sourceDirectory>
<testSourceDirectory>${basedir}/src/test/scala</testSourceDirectory>
<configLocation>tools/maven/scalastyle-config.xml</configLocation>
<outputFile>${project.basedir}/scalastyle-output.xml</outputFile>
<outputEncoding>UTF-8</outputEncoding>
</configuration>
<executions>
<execution>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<!-- just define the Java version to be used for compiling and plugins -->
......
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- NOTE: This was taken and adapted from Apache Spark. -->
<!-- If you wish to turn off checking for a section of code, you can put a comment in the source
before and after the section, with the following syntax: -->
<!-- // scalastyle:off -->
<!-- ... -->
<!-- // naughty stuff -->
<!-- ... -->
<!-- // scalastyle:on -->
<scalastyle>
<name>Scalastyle standard configuration</name>
<check level="error" class="org.scalastyle.file.FileTabChecker" enabled="true"></check>
<!-- <check level="error" class="org.scalastyle.file.FileLengthChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="maxFileLength"><![CDATA[800]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<check level="error" class="org.scalastyle.file.HeaderMatchesChecker" enabled="true">
<parameters>
<parameter name="header"><![CDATA[/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/]]></parameter>
</parameters>
</check>
<check level="error" class="org.scalastyle.scalariform.SpacesAfterPlusChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.file.WhitespaceEndOfLineChecker" enabled="false"></check>
<check level="error" class="org.scalastyle.scalariform.SpacesBeforePlusChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.file.FileLineLengthChecker" enabled="true">
<parameters>
<parameter name="maxLineLength"><![CDATA[100]]></parameter>
<parameter name="tabSize"><![CDATA[2]]></parameter>
<parameter name="ignoreImports">true</parameter>
</parameters>
</check>
<check level="error" class="org.scalastyle.scalariform.ClassNamesChecker" enabled="true">
<parameters>
<parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter>
</parameters>
</check>
<check level="error" class="org.scalastyle.scalariform.ObjectNamesChecker" enabled="true">
<parameters>
<parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter>
</parameters>
</check>
<check level="error" class="org.scalastyle.scalariform.PackageObjectNamesChecker" enabled="true">
<parameters>
<parameter name="regex"><![CDATA[^[a-z][A-Za-z]*$]]></parameter>
</parameters>
</check>
<check level="error" class="org.scalastyle.scalariform.EqualsHashCodeChecker" enabled="false"></check>
<!-- <check level="error" class="org.scalastyle.scalariform.IllegalImportsChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="illegalImports"><![CDATA[sun._,java.awt._]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<check level="error" class="org.scalastyle.scalariform.ParameterNumberChecker" enabled="true">
<parameters>
<parameter name="maxParameters"><![CDATA[10]]></parameter>
</parameters>
</check>
<!-- <check level="error" class="org.scalastyle.scalariform.MagicNumberChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="ignore"><![CDATA[-1,0,1,2,3]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<check level="error" class="org.scalastyle.scalariform.NoWhitespaceBeforeLeftBracketChecker" enabled="false"></check>
<check level="error" class="org.scalastyle.scalariform.NoWhitespaceAfterLeftBracketChecker" enabled="false"></check>
<!-- <check level="error" class="org.scalastyle.scalariform.ReturnChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.NullChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.NoCloneChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.NoFinalizeChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.CovariantEqualsChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.StructuralTypeChecker" enabled="true"></check> -->
<!-- <check level="error" class="org.scalastyle.file.RegexChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="regex"><![CDATA[println]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.NumberOfTypesChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="maxTypes"><![CDATA[30]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.CyclomaticComplexityChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="maximum"><![CDATA[10]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<check level="error" class="org.scalastyle.scalariform.UppercaseLChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.scalariform.SimplifyBooleanExpressionChecker" enabled="false"></check>
<check level="error" class="org.scalastyle.scalariform.IfBraceChecker" enabled="true">
<parameters>
<parameter name="singleLineAllowed"><![CDATA[true]]></parameter>
<parameter name="doubleLineAllowed"><![CDATA[true]]></parameter>
</parameters>
</check>
<!-- <check level="error" class="org.scalastyle.scalariform.MethodLengthChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="maxLength"><![CDATA[50]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.MethodNamesChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="regex"><![CDATA[^[a-z][A-Za-z0-9]*$]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.NumberOfMethodsInTypeChecker" enabled="true"> -->
<!-- <parameters> -->
<!-- <parameter name="maxMethods"><![CDATA[30]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<!-- <check level="error" class="org.scalastyle.scalariform.PublicMethodsHaveTypeChecker" enabled="true"></check> -->
<check level="error" class="org.scalastyle.file.NewLineAtEofChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.file.NoNewLineAtEofChecker" enabled="false"></check>
</scalastyle>
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册