提交 8e613014 编写于 作者: S Stephan Ewen

[FLINK-1672] [runtime] Unify Task and RuntimeEnvironment into one class.

 - This simplifies and hardens the failure handling during task startup
 - Guarantees that no actor system threads are blocked by task bootstrap, or task canceling
 - Corrects some previously erroneous corner case state transitions
 - Adds simple and robust tests
上级 1d368a4b
......@@ -18,6 +18,7 @@
package org.apache.flink.runtime.execution;
import akka.actor.ActorRef;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
......@@ -159,4 +160,6 @@ public interface Environment {
InputGate[] getAllInputGates();
// this should go away
ActorRef getJobManager();
}
......@@ -35,10 +35,10 @@ package org.apache.flink.runtime.execution;
* ... -> FAILED
* </pre>
*
* It is possible to enter the {@code FAILED} state from any other state.
* <p>It is possible to enter the {@code FAILED} state from any other state.</p>
*
* The states {@code FINISHED}, {@code CANCELED}, and {@code FAILED} are
* considered terminal states.
* <p>The states {@code FINISHED}, {@code CANCELED}, and {@code FAILED} are
* considered terminal states.</p>
*/
public enum ExecutionState {
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.execution;
import akka.actor.ActorRef;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.accumulators.AccumulatorEvent;
import org.apache.flink.runtime.broadcast.BroadcastVariableManager;
import org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.io.disk.iomanager.IOManager;
import org.apache.flink.runtime.io.network.NetworkEnvironment;
import org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter;
import org.apache.flink.runtime.io.network.partition.ResultPartition;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.io.network.partition.consumer.InputGate;
import org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.api.common.JobID;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable;
import org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider;
import org.apache.flink.runtime.memorymanager.MemoryManager;
import org.apache.flink.runtime.messages.accumulators.ReportAccumulatorResult;
import org.apache.flink.runtime.taskmanager.Task;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.FutureTask;
import java.util.concurrent.atomic.AtomicBoolean;
import static com.google.common.base.Preconditions.checkElementIndex;
import static com.google.common.base.Preconditions.checkNotNull;
public class RuntimeEnvironment implements Environment, Runnable {
private static final Logger LOG = LoggerFactory.getLogger(RuntimeEnvironment.class);
private static final ThreadGroup TASK_THREADS = new ThreadGroup("Task Threads");
/** The ActorRef to the job manager */
private final ActorRef jobManager;
/** The task that owns this environment */
private final Task owner;
/** The job configuration encapsulated in the environment object. */
private final Configuration jobConfiguration;
/** The task configuration encapsulated in the environment object. */
private final Configuration taskConfiguration;
/** ClassLoader for all user code classes */
private final ClassLoader userCodeClassLoader;
/** Instance of the class to be run in this environment. */
private final AbstractInvokable invokable;
/** The memory manager of the current environment (currently the one associated with the executing TaskManager). */
private final MemoryManager memoryManager;
/** The I/O manager of the current environment (currently the one associated with the executing TaskManager). */
private final IOManager ioManager;
/** The input split provider that can be queried for new input splits. */
private final InputSplitProvider inputSplitProvider;
/** The thread executing the task in the environment. */
private Thread executingThread;
private final BroadcastVariableManager broadcastVariableManager;
private final Map<String, FutureTask<Path>> cacheCopyTasks = new HashMap<String, FutureTask<Path>>();
private final AtomicBoolean canceled = new AtomicBoolean();
private final ResultPartition[] producedPartitions;
private final ResultPartitionWriter[] writers;
private final SingleInputGate[] inputGates;
private final Map<IntermediateDataSetID, SingleInputGate> inputGatesById = new HashMap<IntermediateDataSetID, SingleInputGate>();
public RuntimeEnvironment(
ActorRef jobManager, Task owner, TaskDeploymentDescriptor tdd, ClassLoader userCodeClassLoader,
MemoryManager memoryManager, IOManager ioManager, InputSplitProvider inputSplitProvider,
BroadcastVariableManager broadcastVariableManager, NetworkEnvironment networkEnvironment) throws Exception {
this.owner = checkNotNull(owner);
this.memoryManager = checkNotNull(memoryManager);
this.ioManager = checkNotNull(ioManager);
this.inputSplitProvider = checkNotNull(inputSplitProvider);
this.jobManager = checkNotNull(jobManager);
this.broadcastVariableManager = checkNotNull(broadcastVariableManager);
try {
// Produced intermediate result partitions
final List<ResultPartitionDeploymentDescriptor> partitions = tdd.getProducedPartitions();
this.producedPartitions = new ResultPartition[partitions.size()];
this.writers = new ResultPartitionWriter[partitions.size()];
for (int i = 0; i < this.producedPartitions.length; i++) {
ResultPartitionDeploymentDescriptor desc = partitions.get(i);
ResultPartitionID partitionId = new ResultPartitionID(desc.getPartitionId(), owner.getExecutionId());
this.producedPartitions[i] = new ResultPartition(
this,
owner.getJobID(),
partitionId,
desc.getPartitionType(),
desc.getNumberOfSubpartitions(),
networkEnvironment.getPartitionManager(),
networkEnvironment.getPartitionConsumableNotifier(),
ioManager,
networkEnvironment.getDefaultIOMode());
writers[i] = new ResultPartitionWriter(this.producedPartitions[i]);
}
// Consumed intermediate result partitions
final List<InputGateDeploymentDescriptor> consumedPartitions = tdd.getInputGates();
this.inputGates = new SingleInputGate[consumedPartitions.size()];
for (int i = 0; i < inputGates.length; i++) {
inputGates[i] = SingleInputGate.create(
this, consumedPartitions.get(i), networkEnvironment);
// The input gates are organized by key for task updates/channel updates at runtime
inputGatesById.put(inputGates[i].getConsumedResultId(), inputGates[i]);
}
this.jobConfiguration = tdd.getJobConfiguration();
this.taskConfiguration = tdd.getTaskConfiguration();
// ----------------------------------------------------------------
// Invokable setup
// ----------------------------------------------------------------
// Note: This has to be done *after* the readers and writers have
// been setup, because the invokable relies on them for I/O.
// ----------------------------------------------------------------
// Load and instantiate the invokable class
this.userCodeClassLoader = checkNotNull(userCodeClassLoader);
// Class of the task to run in this environment
Class<? extends AbstractInvokable> invokableClass;
try {
final String className = tdd.getInvokableClassName();
invokableClass = Class.forName(className, true, userCodeClassLoader).asSubclass(AbstractInvokable.class);
}
catch (Throwable t) {
throw new Exception("Could not load invokable class.", t);
}
try {
this.invokable = invokableClass.newInstance();
}
catch (Throwable t) {
throw new Exception("Could not instantiate the invokable class.", t);
}
this.invokable.setEnvironment(this);
this.invokable.registerInputOutput();
}
catch (Throwable t) {
throw new Exception("Error setting up runtime environment: " + t.getMessage(), t);
}
}
/**
* Returns the task invokable instance.
*/
public AbstractInvokable getInvokable() {
return this.invokable;
}
@Override
public JobID getJobID() {
return this.owner.getJobID();
}
@Override
public JobVertexID getJobVertexId() {
return this.owner.getVertexID();
}
@Override
public void run() {
// quick fail in case the task was cancelled while the thread was started
if (owner.isCanceledOrFailed()) {
owner.cancelingDone();
return;
}
try {
Thread.currentThread().setContextClassLoader(userCodeClassLoader);
invokable.invoke();
// Make sure, we enter the catch block when the task has been canceled
if (owner.isCanceledOrFailed()) {
throw new CancelTaskException("Task has been canceled or failed");
}
// Finish the produced partitions
if (producedPartitions != null) {
for (ResultPartition partition : producedPartitions) {
if (partition != null) {
partition.finish();
}
}
}
if (owner.isCanceledOrFailed()) {
throw new CancelTaskException();
}
// Finally, switch execution state to FINISHED and report to job manager
if (!owner.markAsFinished()) {
throw new Exception("Could *not* notify job manager that the task is finished.");
}
}
catch (Throwable t) {
if (!owner.isCanceledOrFailed()) {
// Perform clean up when the task failed and has been not canceled by the user
try {
invokable.cancel();
}
catch (Throwable t2) {
LOG.error("Error while canceling the task", t2);
}
}
// if we are already set as cancelled or failed (when failure is triggered externally),
// mark that the thread is done.
if (owner.isCanceledOrFailed() || t instanceof CancelTaskException) {
owner.cancelingDone();
}
else {
// failure from inside the task thread. notify the task of the failure
owner.markFailed(t);
}
}
}
/**
* Returns the thread, which is assigned to execute the user code.
*/
public Thread getExecutingThread() {
synchronized (this) {
if (executingThread == null) {
String name = owner.getTaskNameWithSubtasks();
if (LOG.isDebugEnabled()) {
name = name + " (" + owner.getExecutionId() + ")";
}
executingThread = new Thread(TASK_THREADS, this, name);
}
return executingThread;
}
}
public void cancelExecution() {
if (!canceled.compareAndSet(false, true)) {
return;
}
LOG.info("Canceling {} ({}).", owner.getTaskNameWithSubtasks(), owner.getExecutionId());
// Request user code to shut down
if (invokable != null) {
try {
invokable.cancel();
}
catch (Throwable e) {
LOG.error("Error while canceling the task.", e);
}
}
final Thread executingThread = this.executingThread;
if (executingThread != null) {
// interrupt the running thread and wait for it to die
executingThread.interrupt();
try {
executingThread.join(5000);
}
catch (InterruptedException e) {
}
if (!executingThread.isAlive()) {
return;
}
// Continuously interrupt the user thread until it changed to state CANCELED
while (executingThread != null && executingThread.isAlive()) {
LOG.warn("Task " + owner.getTaskNameWithSubtasks() + " did not react to cancelling signal. Sending repeated interrupt.");
if (LOG.isDebugEnabled()) {
StringBuilder bld = new StringBuilder("Task ").append(owner.getTaskNameWithSubtasks()).append(" is stuck in method:\n");
StackTraceElement[] stack = executingThread.getStackTrace();
for (StackTraceElement e : stack) {
bld.append(e).append('\n');
}
LOG.debug(bld.toString());
}
executingThread.interrupt();
try {
executingThread.join(1000);
}
catch (InterruptedException e) {
}
}
}
}
@Override
public ActorRef getJobManager() {
return jobManager;
}
@Override
public IOManager getIOManager() {
return ioManager;
}
@Override
public MemoryManager getMemoryManager() {
return memoryManager;
}
@Override
public BroadcastVariableManager getBroadcastVariableManager() {
return broadcastVariableManager;
}
@Override
public void reportAccumulators(Map<String, Accumulator<?, ?>> accumulators) {
AccumulatorEvent evt;
try {
evt = new AccumulatorEvent(getJobID(), accumulators);
}
catch (IOException e) {
throw new RuntimeException("Cannot serialize accumulators to send them to JobManager", e);
}
ReportAccumulatorResult accResult = new ReportAccumulatorResult(getJobID(), owner.getExecutionId(), evt);
jobManager.tell(accResult, ActorRef.noSender());
}
@Override
public ResultPartitionWriter getWriter(int index) {
checkElementIndex(index, writers.length, "Illegal environment writer request.");
return writers[checkElementIndex(index, writers.length)];
}
@Override
public ResultPartitionWriter[] getAllWriters() {
return writers;
}
@Override
public InputGate getInputGate(int index) {
checkElementIndex(index, inputGates.length);
return inputGates[index];
}
@Override
public SingleInputGate[] getAllInputGates() {
return inputGates;
}
public ResultPartition[] getProducedPartitions() {
return producedPartitions;
}
public SingleInputGate getInputGateById(IntermediateDataSetID id) {
return inputGatesById.get(id);
}
@Override
public Configuration getTaskConfiguration() {
return taskConfiguration;
}
@Override
public Configuration getJobConfiguration() {
return jobConfiguration;
}
@Override
public int getNumberOfSubtasks() {
return owner.getNumberOfSubtasks();
}
@Override
public int getIndexInSubtaskGroup() {
return owner.getSubtaskIndex();
}
@Override
public String getTaskName() {
return owner.getTaskName();
}
@Override
public InputSplitProvider getInputSplitProvider() {
return inputSplitProvider;
}
@Override
public String getTaskNameWithSubtasks() {
return owner.getTaskNameWithSubtasks();
}
@Override
public ClassLoader getUserClassLoader() {
return userCodeClassLoader;
}
public void addCopyTasksForCacheFile(Map<String, FutureTask<Path>> copyTasks) {
cacheCopyTasks.putAll(copyTasks);
}
public void addCopyTaskForCacheFile(String name, FutureTask<Path> copyTask) {
cacheCopyTasks.put(name, copyTask);
}
@Override
public Map<String, FutureTask<Path>> getCopyTask() {
return cacheCopyTasks;
}
}
......@@ -243,7 +243,7 @@ public class NetworkEnvironment {
public void registerTask(Task task) throws IOException {
final ResultPartition[] producedPartitions = task.getProducedPartitions();
final ResultPartitionWriter[] writers = task.getWriters();
final ResultPartitionWriter[] writers = task.getAllWriters();
if (writers.length != producedPartitions.length) {
throw new IllegalStateException("Unequal number of writers and partitions.");
......@@ -288,7 +288,7 @@ public class NetworkEnvironment {
}
// Setup the buffer pool for each buffer reader
final SingleInputGate[] inputGates = task.getInputGates();
final SingleInputGate[] inputGates = task.getAllInputGates();
for (SingleInputGate gate : inputGates) {
BufferPool bufferPool = null;
......@@ -329,10 +329,9 @@ public class NetworkEnvironment {
partitionManager.releasePartitionsProducedBy(executionId);
}
ResultPartitionWriter[] writers = task.getWriters();
ResultPartitionWriter[] writers = task.getAllWriters();
if (writers != null) {
for (ResultPartitionWriter writer : task.getWriters()) {
for (ResultPartitionWriter writer : writers) {
taskEventDispatcher.unregisterWriter(writer);
}
}
......@@ -344,7 +343,7 @@ public class NetworkEnvironment {
}
}
final SingleInputGate[] inputGates = task.getInputGates();
final SingleInputGate[] inputGates = task.getAllInputGates();
if (inputGates != null) {
for (SingleInputGate gate : inputGates) {
......
......@@ -18,7 +18,6 @@
package org.apache.flink.runtime.io.network.partition;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.executiongraph.IntermediateResultPartition;
import org.apache.flink.runtime.io.disk.iomanager.IOManager;
import org.apache.flink.runtime.io.disk.iomanager.IOManager.IOMode;
......@@ -76,9 +75,8 @@ import static com.google.common.base.Preconditions.checkState;
public class ResultPartition implements BufferPoolOwner {
private static final Logger LOG = LoggerFactory.getLogger(ResultPartition.class);
/** The owning environment. Mainly for debug purposes. */
private final Environment owner;
private final String owningTaskName;
private final JobID jobId;
......@@ -120,7 +118,7 @@ public class ResultPartition implements BufferPoolOwner {
private long totalNumberOfBytes;
public ResultPartition(
Environment owner,
String owningTaskName,
JobID jobId,
ResultPartitionID partitionId,
ResultPartitionType partitionType,
......@@ -130,7 +128,7 @@ public class ResultPartition implements BufferPoolOwner {
IOManager ioManager,
IOMode defaultIoMode) {
this.owner = checkNotNull(owner);
this.owningTaskName = checkNotNull(owningTaskName);
this.jobId = checkNotNull(jobId);
this.partitionId = checkNotNull(partitionId);
this.partitionType = checkNotNull(partitionType);
......@@ -162,7 +160,7 @@ public class ResultPartition implements BufferPoolOwner {
// Initially, partitions should be consumed once before release.
pin();
LOG.debug("{}: Initialized {}", owner.getTaskNameWithSubtasks(), this);
LOG.debug("{}: Initialized {}", owningTaskName, this);
}
/**
......@@ -281,7 +279,7 @@ public class ResultPartition implements BufferPoolOwner {
*/
public void release() {
if (isReleased.compareAndSet(false, true)) {
LOG.debug("{}: Releasing {}.", owner.getTaskNameWithSubtasks(), this);
LOG.debug("{}: Releasing {}.", owningTaskName, this);
// Release all subpartitions
for (ResultSubpartition subpartition : subpartitions) {
......
......@@ -24,7 +24,6 @@ import org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor;
import org.apache.flink.runtime.deployment.ResultPartitionLocation;
import org.apache.flink.runtime.event.task.AbstractEvent;
import org.apache.flink.runtime.event.task.TaskEvent;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.io.network.NetworkEnvironment;
import org.apache.flink.runtime.io.network.api.EndOfPartitionEvent;
import org.apache.flink.runtime.io.network.api.serialization.EventSerializer;
......@@ -101,8 +100,8 @@ public class SingleInputGate implements InputGate {
/** Lock object to guard partition requests and runtime channel updates. */
private final Object requestLock = new Object();
/** The owning environment. Mainly for debug purposes. */
private final Environment owner;
/** The name of the owning task, for logging purposes. */
private final String owningTaskName;
/**
* The ID of the consumed intermediate result. Each input gate consumes partitions of the
......@@ -153,12 +152,12 @@ public class SingleInputGate implements InputGate {
private int numberOfUninitializedChannels;
public SingleInputGate(
Environment owner,
String owningTaskName,
IntermediateDataSetID consumedResultId,
int consumedSubpartitionIndex,
int numberOfInputChannels) {
this.owner = checkNotNull(owner);
this.owningTaskName = checkNotNull(owningTaskName);
this.consumedResultId = checkNotNull(consumedResultId);
checkArgument(consumedSubpartitionIndex >= 0);
......@@ -265,7 +264,7 @@ public class SingleInputGate implements InputGate {
synchronized (requestLock) {
if (!isReleased) {
try {
LOG.debug("{}: Releasing {}.", owner.getTaskNameWithSubtasks(), this);
LOG.debug("{}: Releasing {}.", owningTaskName, this);
for (InputChannel inputChannel : inputChannels.values()) {
try {
......@@ -410,7 +409,7 @@ public class SingleInputGate implements InputGate {
* Creates an input gate and all of its input channels.
*/
public static SingleInputGate create(
Environment owner,
String owningTaskName,
InputGateDeploymentDescriptor igdd,
NetworkEnvironment networkEnvironment) {
......@@ -422,7 +421,7 @@ public class SingleInputGate implements InputGate {
final InputChannelDeploymentDescriptor[] icdd = checkNotNull(igdd.getInputChannelDeploymentDescriptors());
final SingleInputGate inputGate = new SingleInputGate(
owner, consumedResultId, consumedSubpartitionIndex, icdd.length);
owningTaskName, consumedResultId, consumedSubpartitionIndex, icdd.length);
// Create the input channels. There is one input channel for each consumed partition.
final InputChannel[] inputChannels = new InputChannel[icdd.length];
......
......@@ -1067,7 +1067,8 @@ public class RegularPactTask<S extends Function, OT> extends AbstractInvokable i
public DistributedRuntimeUDFContext createRuntimeContext(String taskName) {
Environment env = getEnvironment();
return new DistributedRuntimeUDFContext(taskName, env.getNumberOfSubtasks(),
env.getIndexInSubtaskGroup(), getUserCodeClassLoader(), getExecutionConfig(), env.getCopyTask());
env.getIndexInSubtaskGroup(), getUserCodeClassLoader(), getExecutionConfig(),
env.getDistributedCacheEntries());
}
// --------------------------------------------------------------------------------------------
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.taskmanager;
import akka.actor.ActorRef;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.accumulators.AccumulatorEvent;
import org.apache.flink.runtime.broadcast.BroadcastVariableManager;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
import org.apache.flink.runtime.io.disk.iomanager.IOManager;
import org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter;
import org.apache.flink.runtime.io.network.partition.consumer.InputGate;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider;
import org.apache.flink.runtime.memorymanager.MemoryManager;
import org.apache.flink.runtime.messages.accumulators.ReportAccumulatorResult;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.Future;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkArgument;
/**
* In implementation of the {@link Environment}.
*/
public class RuntimeEnvironment implements Environment {
private final JobID jobId;
private final JobVertexID jobVertexId;
private final ExecutionAttemptID executionId;
private final String taskName;
private final String taskNameWithSubtasks;
private final int subtaskIndex;
private final int parallelism;
private final Configuration jobConfiguration;
private final Configuration taskConfiguration;
private final ClassLoader userCodeClassLoader;
private final MemoryManager memManager;
private final IOManager ioManager;
private final BroadcastVariableManager bcVarManager;
private final InputSplitProvider splitProvider;
private final Map<String, Future<Path>> distCacheEntries;
private final ResultPartitionWriter[] writers;
private final InputGate[] inputGates;
private final ActorRef jobManagerActor;
// ------------------------------------------------------------------------
public RuntimeEnvironment(JobID jobId, JobVertexID jobVertexId, ExecutionAttemptID executionId,
String taskName, String taskNameWithSubtasks,
int subtaskIndex, int parallelism,
Configuration jobConfiguration, Configuration taskConfiguration,
ClassLoader userCodeClassLoader,
MemoryManager memManager, IOManager ioManager,
BroadcastVariableManager bcVarManager,
InputSplitProvider splitProvider,
Map<String, Future<Path>> distCacheEntries,
ResultPartitionWriter[] writers,
InputGate[] inputGates,
ActorRef jobManagerActor) {
checkArgument(parallelism > 0 && subtaskIndex >= 0 && subtaskIndex < parallelism);
this.jobId = checkNotNull(jobId);
this.jobVertexId = checkNotNull(jobVertexId);
this.executionId = checkNotNull(executionId);
this.taskName = checkNotNull(taskName);
this.taskNameWithSubtasks = checkNotNull(taskNameWithSubtasks);
this.subtaskIndex = subtaskIndex;
this.parallelism = parallelism;
this.jobConfiguration = checkNotNull(jobConfiguration);
this.taskConfiguration = checkNotNull(taskConfiguration);
this.userCodeClassLoader = checkNotNull(userCodeClassLoader);
this.memManager = checkNotNull(memManager);
this.ioManager = checkNotNull(ioManager);
this.bcVarManager = checkNotNull(bcVarManager);
this.splitProvider = checkNotNull(splitProvider);
this.distCacheEntries = checkNotNull(distCacheEntries);
this.writers = checkNotNull(writers);
this.inputGates = checkNotNull(inputGates);
this.jobManagerActor = checkNotNull(jobManagerActor);
}
// ------------------------------------------------------------------------
@Override
public JobID getJobID() {
return jobId;
}
@Override
public JobVertexID getJobVertexId() {
return jobVertexId;
}
@Override
public ExecutionAttemptID getExecutionId() {
return executionId;
}
@Override
public String getTaskName() {
return taskName;
}
@Override
public String getTaskNameWithSubtasks() {
return taskNameWithSubtasks;
}
@Override
public int getNumberOfSubtasks() {
return parallelism;
}
@Override
public int getIndexInSubtaskGroup() {
return subtaskIndex;
}
@Override
public Configuration getJobConfiguration() {
return jobConfiguration;
}
@Override
public Configuration getTaskConfiguration() {
return taskConfiguration;
}
@Override
public ClassLoader getUserClassLoader() {
return userCodeClassLoader;
}
@Override
public MemoryManager getMemoryManager() {
return memManager;
}
@Override
public IOManager getIOManager() {
return ioManager;
}
@Override
public BroadcastVariableManager getBroadcastVariableManager() {
return bcVarManager;
}
@Override
public InputSplitProvider getInputSplitProvider() {
return splitProvider;
}
@Override
public Map<String, Future<Path>> getDistributedCacheEntries() {
return distCacheEntries;
}
@Override
public ResultPartitionWriter getWriter(int index) {
return writers[index];
}
@Override
public ResultPartitionWriter[] getAllWriters() {
return writers;
}
@Override
public InputGate getInputGate(int index) {
return inputGates[index];
}
@Override
public InputGate[] getAllInputGates() {
return inputGates;
}
@Override
public void reportAccumulators(Map<String, Accumulator<?, ?>> accumulators) {
AccumulatorEvent evt;
try {
evt = new AccumulatorEvent(getJobID(), accumulators);
}
catch (IOException e) {
throw new RuntimeException("Cannot serialize accumulators to send them to JobManager", e);
}
ReportAccumulatorResult accResult = new ReportAccumulatorResult(jobId, executionId, evt);
jobManagerActor.tell(accResult, ActorRef.noSender());
}
@Override
public ActorRef getJobManager() {
return jobManagerActor;
}
}
......@@ -24,7 +24,15 @@ import org.apache.flink.runtime.instance.InstanceID
* Miscellaneous actor messages exchanged with the TaskManager.
*/
object TaskManagerMessages {
/**
* This message informs the TaskManager about a fatal error that prevents
* it from continuing.
*
* @param description The description of the problem
*/
case class FatalError(description: String, cause: Throwable)
/**
* Tells the task manager to send a heartbeat message to the job manager.
*/
......@@ -49,7 +57,7 @@ object TaskManagerMessages {
// --------------------------------------------------------------------------
// Utility messages used for notifications during TaskManager startup
// Reporting the current TaskManager stack trace
// --------------------------------------------------------------------------
/**
......
......@@ -67,12 +67,12 @@ object TaskMessages {
extends TaskMessage
/**
* Unregister the task identified by [[executionID]] from the TaskManager.
* Sent to the TaskManager by futures and callbacks.
* Notifies the TaskManager that the task has reached its final state,
* either FINISHED, CANCELED, or FAILED.
*
* @param executionID The task's execution attempt ID.
*/
case class UnregisterTask(executionID: ExecutionAttemptID)
case class TaskInFinalState(executionID: ExecutionAttemptID)
extends TaskMessage
......
......@@ -21,7 +21,7 @@ package org.apache.flink.runtime.taskmanager
import java.io.{File, IOException}
import java.net.{InetAddress, InetSocketAddress}
import java.util
import java.util.concurrent.{TimeUnit, FutureTask}
import java.util.concurrent.TimeUnit
import java.lang.reflect.Method
import java.lang.management.{GarbageCollectorMXBean, ManagementFactory, MemoryMXBean}
......@@ -36,16 +36,13 @@ import com.codahale.metrics.jvm.{MemoryUsageGaugeSet, GarbageCollectorMetricSet}
import com.fasterxml.jackson.databind.ObjectMapper
import grizzled.slf4j.Logger
import org.apache.flink.api.common.cache.DistributedCache
import org.apache.flink.configuration._
import org.apache.flink.core.fs.Path
import org.apache.flink.runtime.{ActorSynchronousLogging, ActorLogMessages}
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.flink.runtime.blob.{BlobService, BlobCache}
import org.apache.flink.runtime.broadcast.BroadcastVariableManager
import org.apache.flink.runtime.deployment.{InputChannelDeploymentDescriptor, TaskDeploymentDescriptor}
import org.apache.flink.runtime.execution.librarycache.{BlobLibraryCacheManager, FallbackLibraryCacheManager, LibraryCacheManager}
import org.apache.flink.runtime.execution.{CancelTaskException, ExecutionState, RuntimeEnvironment}
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID
import org.apache.flink.runtime.filecache.FileCache
import org.apache.flink.runtime.instance.{HardwareDescription, InstanceConnectionInfo, InstanceID}
......@@ -54,7 +51,6 @@ import org.apache.flink.runtime.io.disk.iomanager.{IOManager, IOManagerAsync}
import org.apache.flink.runtime.io.network.NetworkEnvironment
import org.apache.flink.runtime.io.network.netty.NettyConfig
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID
import org.apache.flink.runtime.jobgraph.tasks.{OperatorStateCarrier,BarrierTransceiver}
import org.apache.flink.runtime.jobmanager.JobManager
import org.apache.flink.runtime.memorymanager.{MemoryManager, DefaultMemoryManager}
import org.apache.flink.runtime.messages.CheckpointingMessages.{CheckpointingMessage, BarrierReq}
......@@ -67,9 +63,6 @@ import org.apache.flink.runtime.process.ProcessReaper
import org.apache.flink.runtime.security.SecurityUtils
import org.apache.flink.runtime.security.SecurityUtils.FlinkSecuredRunner
import org.apache.flink.runtime.util.{MathUtils, EnvironmentInformation}
import org.apache.flink.util.ExceptionUtils
import org.slf4j.LoggerFactory
import scala.concurrent._
import scala.concurrent.duration._
......@@ -136,13 +129,13 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
protected val resources = HardwareDescription.extractFromSystem(memoryManager.getMemorySize)
/** Registry of all tasks currently executed by this TaskManager */
protected val runningTasks = new util.concurrent.ConcurrentHashMap[ExecutionAttemptID, Task]()
protected val runningTasks = new java.util.HashMap[ExecutionAttemptID, Task]()
/** Handler for shared broadcast variables (shared between multiple Tasks) */
protected val bcVarManager = new BroadcastVariableManager()
/** Handler for distributed files cached by this TaskManager */
protected val fileCache = new FileCache()
protected val fileCache = new FileCache(config.configuration)
/** Registry of metrics periodically transmitted to the JobManager */
private val metricRegistry = TaskManager.createMetricsRegistry()
......@@ -282,6 +275,9 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
case Disconnect(msg) =>
handleJobManagerDisconnect(sender(), "JobManager requested disconnect: " + msg)
case FatalError(message, cause) =>
killTaskManagerFatal(message, cause)
}
/**
......@@ -344,12 +340,13 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
// state transition
case updateMsg @ UpdateTaskExecutionState(taskExecutionState: TaskExecutionState) =>
// we receive these from our tasks and forward them to the JobManager
currentJobManager foreach {
jobManager => {
val futureResponse = (jobManager ? updateMsg)(askTimeout)
val executionID = taskExecutionState.getID
val executionState = taskExecutionState.getExecutionState
futureResponse.mapTo[Boolean].onComplete {
// IMPORTANT: In the future callback, we cannot directly modify state
......@@ -359,21 +356,16 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
self ! FailTask(executionID,
new Exception("Task has been cancelled on the JobManager."))
}
if (!result || executionState.isTerminal) {
self ! UnregisterTask(executionID)
}
case Failure(t) =>
self ! FailTask(executionID, new Exception(
"Failed to send ExecutionStateChange notification to JobManager"))
self ! UnregisterTask(executionID)
}(context.dispatcher)
}
}
// removes the task from the TaskManager and frees all its resources
case UnregisterTask(executionID) =>
case TaskInFinalState(executionID) =>
unregisterTaskAndNotifyFinalState(executionID)
// starts a new task on the TaskManager
......@@ -383,35 +375,22 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
// marks a task as failed for an external reason
// external reasons are reasons other than the task code itself throwing an exception
case FailTask(executionID, cause) =>
Option(runningTasks.get(executionID)) match {
case Some(task) =>
// execute failing operation concurrently
implicit val executor = context.dispatcher
Future {
task.failExternally(cause)
}.onFailure{
case t: Throwable => log.error(s"Could not fail task ${task} externally.", t)
}
case None =>
val task = runningTasks.get(executionID)
if (task != null) {
task.failExternally(cause)
} else {
log.debug(s"Cannot find task to fail for execution ${executionID})")
}
// cancels a task
case CancelTask(executionID) =>
Option(runningTasks.get(executionID)) match {
case Some(task) =>
// execute cancel operation concurrently
implicit val executor = context.dispatcher
Future {
task.cancelExecution()
}.onFailure{
case t: Throwable => log.error("Could not cancel task " + task, t)
}
sender ! new TaskOperationResult(executionID, true)
case None =>
sender ! new TaskOperationResult(executionID, false,
val task = runningTasks.get(executionID)
if (task != null) {
task.cancelExecution()
sender ! new TaskOperationResult(executionID, true)
} else {
log.debug(s"Cannot find task to cancel for execution ${executionID})")
sender ! new TaskOperationResult(executionID, false,
"No task with that execution ID was found.")
}
}
......@@ -430,24 +409,11 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
log.debug(s"[FT-TaskManager] Barrier $checkpointID request received " +
s"for attempt $attemptID.")
Option(runningTasks.get(attemptID)) match {
case Some(i) =>
if (i.getExecutionState == ExecutionState.RUNNING) {
i.getEnvironment.getInvokable match {
case barrierTransceiver: BarrierTransceiver =>
new Thread(new Runnable {
override def run(): Unit =
barrierTransceiver.broadcastBarrierFromSource(checkpointID)
}).start()
case _ => log.error("Taskmanager received a checkpoint request for " +
s"non-checkpointing task $attemptID.")
}
}
case None =>
// may always happen in case of canceled/finished tasks
log.debug(s"Taskmanager received a checkpoint request for unknown task $attemptID.")
val task = runningTasks.get(attemptID)
if (task != null) {
task.triggerCheckpointBarrier(checkpointID)
} else {
log.debug(s"Taskmanager received a checkpoint request for unknown task $attemptID.")
}
// unknown checkpoint message
......@@ -770,8 +736,7 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
}
}
}
// --------------------------------------------------------------------------
// Task Operations
// --------------------------------------------------------------------------
......@@ -784,130 +749,46 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
* @param tdd TaskDeploymentDescriptor describing the task to be executed on this [[TaskManager]]
*/
private def submitTask(tdd: TaskDeploymentDescriptor): Unit = {
val slot = tdd.getTargetSlotNumber
if (!isConnected) {
sender ! Failure(
new IllegalStateException("TaskManager is not associated with a JobManager.")
)
} else if (slot < 0 || slot >= numberOfSlots) {
sender ! Failure(new Exception(s"Target slot $slot does not exist on TaskManager."))
} else {
sender ! Acknowledge
Future {
initializeTask(tdd)
}(context.dispatcher)
}
}
/** Sets up a [[org.apache.flink.runtime.execution.RuntimeEnvironment]] for the task and starts
* its execution in a separate thread.
*
* @param tdd TaskDeploymentDescriptor describing the task to be executed on this [[TaskManager]]
*/
private def initializeTask(tdd: TaskDeploymentDescriptor): Unit ={
val jobID = tdd.getJobID
val vertexID = tdd.getVertexID
val executionID = tdd.getExecutionId
val taskIndex = tdd.getIndexInSubtaskGroup
val numSubtasks = tdd.getNumberOfSubtasks
var startRegisteringTask = 0L
var task: Task = null
try {
val userCodeClassLoader = libraryCacheManager match {
case Some(manager) =>
if (log.isDebugEnabled) {
startRegisteringTask = System.currentTimeMillis()
}
// triggers the download of all missing jar files from the job manager
manager.registerTask(jobID, executionID, tdd.getRequiredJarFiles)
if (log.isDebugEnabled) {
log.debug(s"Register task $executionID at library cache manager " +
s"took ${(System.currentTimeMillis() - startRegisteringTask) / 1000.0}s")
}
manager.getClassLoader(jobID)
case None => throw new IllegalStateException("There is no valid library cache manager.")
}
if (userCodeClassLoader == null) {
throw new RuntimeException("No user code Classloader available.")
}
task = new Task(jobID, vertexID, taskIndex, numSubtasks, executionID,
tdd.getTaskName, self)
Option(runningTasks.put(executionID, task)) match {
case Some(_) => throw new RuntimeException(
s"TaskManager contains already a task with executionID $executionID.")
// grab some handles and sanity check on the fly
val jobManagerActor = currentJobManager match {
case Some(jm) => jm
case None =>
throw new IllegalStateException("TaskManager is not associated with a JobManager.")
}
val env = currentJobManager match {
case Some(jobManager) =>
val splitProvider = new TaskInputSplitProvider(jobManager, jobID, vertexID,
executionID, userCodeClassLoader, askTimeout)
new RuntimeEnvironment(jobManager, task, tdd, userCodeClassLoader,
memoryManager, ioManager, splitProvider, bcVarManager, network)
case None => throw new IllegalStateException(
"TaskManager has not yet been registered at a JobManager.")
}
task.setEnvironment(env)
//inject operator state
if (tdd.getOperatorStates != null) {
task.getEnvironment.getInvokable match {
case opStateCarrier: OperatorStateCarrier =>
opStateCarrier.injectState(tdd.getOperatorStates)
}
val libCache = libraryCacheManager match {
case Some(manager) => manager
case None => throw new IllegalStateException("There is no valid library cache manager.")
}
// register the task with the network stack and profiles
log.info(s"Register task $task.")
network.registerTask(task)
val cpTasks = new util.HashMap[String, FutureTask[Path]]()
for (entry <- DistributedCache.readFileInfoFromConfig(tdd.getJobConfiguration).asScala) {
val cp = fileCache.createTmpFile(entry.getKey, entry.getValue, jobID)
cpTasks.put(entry.getKey, cp)
val slot = tdd.getTargetSlotNumber
if (slot < 0 || slot >= numberOfSlots) {
throw new IllegalArgumentException(s"Target slot $slot does not exist on TaskManager.")
}
env.addCopyTasksForCacheFile(cpTasks)
if (!task.startExecution()) {
throw new RuntimeException("Cannot start task. Task was canceled or failed.")
// create the task. this does not grab any TaskManager resources or download
// and libraries - the operation does not block
val execId = tdd.getExecutionId
val task = new Task(tdd, memoryManager, ioManager, network, bcVarManager,
self, jobManagerActor, config.timeout, libCache, fileCache)
// add the task to the map
val prevTask = runningTasks.put(execId, task)
if (prevTask != null) {
// already have a task for that ID, put if back and report an error
runningTasks.put(execId, prevTask)
throw new IllegalStateException("TaskManager already contains a task for id " + execId)
}
self ! UpdateTaskExecutionState(
new TaskExecutionState(jobID, executionID, ExecutionState.RUNNING)
)
} catch {
case t: Throwable =>
if (!t.isInstanceOf[CancelTaskException]) {
log.error("Could not instantiate task with execution ID " + executionID, t)
}
try {
if (task != null) {
task.failExternally(t)
removeAllTaskResources(task)
}
libraryCacheManager foreach { _.unregisterTask(jobID, executionID) }
} catch {
case t: Throwable => log.error("Error during cleanup of task deployment.", t)
}
self ! UpdateTaskExecutionState(
new TaskExecutionState(jobID, executionID, ExecutionState.FAILED, t)
)
// all good, we kick off the task, which performs its own initialization
task.startTaskThread()
sender ! Acknowledge
}
catch {
case t: Throwable =>
log.error("SubmitTask failed", t)
sender ! Failure(t)
}
}
......@@ -927,19 +808,20 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
val errors: Seq[String] = partitionInfos.flatMap { info =>
val (resultID, partitionInfo) = info
val reader = task.getEnvironment.getInputGateById(resultID)
val reader = task.getInputGateById(resultID)
if (reader != null) {
Future {
try {
reader.updateInputChannel(partitionInfo)
} catch {
}
catch {
case t: Throwable =>
log.error(s"Could not update input data location for task " +
s"${task.getTaskName}. Trying to fail task.", t)
try {
task.markFailed(t)
task.failExternally(t)
}
catch {
case t: Throwable =>
......@@ -977,20 +859,20 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
private def cancelAndClearEverything(cause: Throwable) {
if (runningTasks.size > 0) {
log.info("Cancelling all computations and discarding all cached data.")
for (t <- runningTasks.values().asScala) {
t.failExternally(cause)
unregisterTaskAndNotifyFinalState(t.getExecutionId)
}
runningTasks.clear()
}
}
private def unregisterTaskAndNotifyFinalState(executionID: ExecutionAttemptID): Unit = {
Option(runningTasks.remove(executionID)) match {
case Some(task) =>
// mark the task as failed if it is not yet in a final state
val task = runningTasks.remove(executionID)
if (task != null) {
// the task must be in a terminal state
if (!task.getExecutionState.isTerminal) {
try {
task.failExternally(new Exception("Task is being removed from TaskManager"))
......@@ -999,66 +881,15 @@ extends Actor with ActorLogMessages with ActorSynchronousLogging {
}
}
log.info(s"Unregister task with execution ID $executionID.")
removeAllTaskResources(task)
libraryCacheManager foreach { _.unregisterTask(task.getJobID, executionID) }
log.info(s"Updating FINAL execution state of ${task.getTaskName} " +
s"(${task.getExecutionId}) to ${task.getExecutionState}.")
log.info(s"Unregistering task and sending final execution state " +
s"${task.getExecutionState} to JobManager for task ${task.getTaskName} " +
s"(${task.getExecutionId})")
self ! UpdateTaskExecutionState(new TaskExecutionState(
task.getJobID, task.getExecutionId, task.getExecutionState, task.getFailureCause))
case None =>
log.debug(s"Cannot find task with ID $executionID to unregister.")
}
}
/**
* This method cleans up the resources of a task in the distributed cache,
* network stack and the memory manager.
*
* If the cleanup in the network stack or memory manager fails, this is considered
* a fatal problem (critical resource leak) and causes the TaskManager to quit.
* A TaskManager JVM restart is the best safe way to fix that error.
*
* @param task The Task whose resources should be cleared.
*/
private def removeAllTaskResources(task: Task): Unit = {
// release the critical things first, and fail fatally if it does not work
// this releases all task resources, like buffer pools and intermediate result
// partitions being built. If this fails, the TaskManager is in serious trouble,
// as this is a massive resource leak. We kill the TaskManager in that case,
// to recover through a clean JVM start
try {
network.unregisterTask(task)
} catch {
case t: Throwable =>
killTaskManagerFatal("Failed to unregister task resources from network stack", t)
}
// safety net to release all the task's memory
try {
task.unregisterMemoryManager(memoryManager)
} catch {
case t: Throwable =>
killTaskManagerFatal("Failed to unregister task memory from memory manager", t)
}
// release temp files from the distributed cache
if (task.getEnvironment != null) {
try {
for (entry <- DistributedCache.readFileInfoFromConfig(
task.getEnvironment.getJobConfiguration).asScala) {
fileCache.deleteTmpFile(entry.getKey, entry.getValue, task.getJobID)
}
} catch {
// this is pretty unpleasant, but not a reason to give up immediately
case e: Exception => log.error(
"Error cleaning up local temp files from the distributed cache.", e)
}
else {
log.error(s"Cannot find task with ID $executionID to unregister.")
}
}
......
......@@ -19,8 +19,8 @@
package org.apache.flink.runtime.io.network.partition.consumer;
import com.google.common.collect.Lists;
import org.apache.flink.api.common.JobID;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.io.disk.iomanager.IOManager;
import org.apache.flink.runtime.io.network.TaskEventDispatcher;
import org.apache.flink.runtime.io.network.buffer.BufferPool;
......@@ -36,6 +36,7 @@ import org.apache.flink.runtime.io.network.util.TestPartitionProducer;
import org.apache.flink.runtime.io.network.util.TestProducerSource;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.junit.Test;
import java.util.Collections;
......@@ -93,7 +94,7 @@ public class LocalInputChannelTest {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartition(
mock(Environment.class),
"Test Name",
jobId,
partitionIds[i],
ResultPartitionType.PIPELINED,
......@@ -222,7 +223,7 @@ public class LocalInputChannelTest {
checkArgument(numberOfExpectedBuffersPerChannel >= 1);
this.inputGate = new SingleInputGate(
mock(Environment.class),
"Test Name",
new IntermediateDataSetID(),
subpartitionIndex,
numberOfInputChannels);
......
......@@ -22,8 +22,6 @@ import org.apache.flink.core.memory.MemorySegment;
import org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor;
import org.apache.flink.runtime.deployment.ResultPartitionLocation;
import org.apache.flink.runtime.event.task.TaskEvent;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.execution.RuntimeEnvironment;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
import org.apache.flink.runtime.io.network.ConnectionManager;
import org.apache.flink.runtime.io.network.TaskEventDispatcher;
......@@ -61,7 +59,7 @@ public class SingleInputGateTest {
public void testBasicGetNextLogic() throws Exception {
// Setup
final SingleInputGate inputGate = new SingleInputGate(
mock(Environment.class), new IntermediateDataSetID(), 0, 2);
"Test Task Name", new IntermediateDataSetID(), 0, 2);
final TestInputChannel[] inputChannels = new TestInputChannel[]{
new TestInputChannel(inputGate, 0),
......@@ -107,7 +105,7 @@ public class SingleInputGateTest {
// Setup reader with one local and one unknown input channel
final IntermediateDataSetID resultId = new IntermediateDataSetID();
final SingleInputGate inputGate = new SingleInputGate(mock(Environment.class), resultId, 0, 2);
final SingleInputGate inputGate = new SingleInputGate("Test Task Name", resultId, 0, 2);
final BufferPool bufferPool = mock(BufferPool.class);
when(bufferPool.getNumberOfRequiredMemorySegments()).thenReturn(2);
......
......@@ -18,7 +18,6 @@
package org.apache.flink.runtime.io.network.partition.consumer;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.io.network.buffer.Buffer;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
......@@ -30,7 +29,6 @@ import java.util.List;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkElementIndex;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
/**
......@@ -50,7 +48,7 @@ public class TestSingleInputGate {
checkArgument(numberOfInputChannels >= 1);
this.inputGate = spy(new SingleInputGate(
mock(Environment.class), new IntermediateDataSetID(), 0, numberOfInputChannels));
"Test Task Name", new IntermediateDataSetID(), 0, numberOfInputChannels));
this.inputChannels = new TestInputChannel[numberOfInputChannels];
......
......@@ -18,14 +18,13 @@
package org.apache.flink.runtime.io.network.partition.consumer;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
public class UnionInputGateTest {
......@@ -39,9 +38,9 @@ public class UnionInputGateTest {
@Test(timeout = 120 * 1000)
public void testBasicGetNextLogic() throws Exception {
// Setup
final Environment env = mock(Environment.class);
final SingleInputGate ig1 = new SingleInputGate(env, new IntermediateDataSetID(), 0, 3);
final SingleInputGate ig2 = new SingleInputGate(env, new IntermediateDataSetID(), 0, 5);
final String testTaskName = "Test Task";
final SingleInputGate ig1 = new SingleInputGate(testTaskName, new IntermediateDataSetID(), 0, 3);
final SingleInputGate ig2 = new SingleInputGate(testTaskName, new IntermediateDataSetID(), 0, 5);
final UnionInputGate union = new UnionInputGate(new SingleInputGate[]{ig1, ig2});
......
......@@ -18,6 +18,7 @@
package org.apache.flink.runtime.operators.testutils;
import akka.actor.ActorRef;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
......@@ -261,4 +262,9 @@ public class MockEnvironment implements Environment {
public void reportAccumulators(Map<String, Accumulator<?, ?>> accumulators) {
// discard, this is only for testing
}
@Override
public ActorRef getJobManager() {
return ActorRef.noSender();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.taskmanager;
import akka.actor.UntypedActor;
import java.util.concurrent.BlockingQueue;
/**
* Actor for testing that simply puts all its messages into a
* blocking queue.
*/
class ForwardingActor extends UntypedActor {
private final BlockingQueue<Object> queue;
public ForwardingActor(BlockingQueue<Object> queue) {
this.queue = queue;
}
@Override
public void onReceive(Object message) {
queue.add(message);
}
}
......@@ -21,6 +21,8 @@ package org.apache.flink.runtime.taskmanager;
import static org.junit.Assert.*;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
......@@ -84,4 +86,35 @@ public class TaskExecutionStateTest {
fail(e.getMessage());
}
}
@Test
public void hanleNonSerializableException() {
try {
@SuppressWarnings({"ThrowableInstanceNeverThrown", "serial"})
Exception hostile = new Exception() {
// should be non serializable, because it contains the outer class reference
@Override
public String getMessage() {
throw new RuntimeException("Cannot get Message");
}
@Override
public void printStackTrace(PrintStream s) {
throw new RuntimeException("Cannot print");
}
@Override
public void printStackTrace(PrintWriter s) {
throw new RuntimeException("Cannot print");
}
};
new TaskExecutionState(new JobID(), new ExecutionAttemptID(), ExecutionState.FAILED, hostile);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
}
......@@ -29,6 +29,7 @@ import akka.testkit.JavaTestKit;
import akka.util.Timeout;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.akka.AkkaUtils;
import org.apache.flink.runtime.blob.BlobKey;
import org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
......@@ -56,11 +57,14 @@ import org.apache.flink.runtime.messages.TaskMessages.SubmitTask;
import org.apache.flink.runtime.messages.TaskMessages.TaskOperationResult;
import org.apache.flink.runtime.testingUtils.TestingTaskManager;
import org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages;
import org.apache.flink.runtime.testingUtils.TestingUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Option;
import scala.concurrent.Await;
import scala.concurrent.Future;
......@@ -75,42 +79,61 @@ import java.util.Set;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
@SuppressWarnings("serial")
public class TaskManagerTest {
private static ActorSystem system;
private static Timeout timeout = new Timeout(1, TimeUnit.MINUTES);
private static final Logger LOG = LoggerFactory.getLogger(TaskManagerTest.class);
private static final Timeout timeout = new Timeout(1, TimeUnit.MINUTES);
private final FiniteDuration d = new FiniteDuration(20, TimeUnit.SECONDS);
private static final FiniteDuration d = new FiniteDuration(20, TimeUnit.SECONDS);
private static ActorSystem system;
@BeforeClass
public static void setup() {
system = ActorSystem.create("TestActorSystem", TestingUtils.testConfig());
system = AkkaUtils.createLocalActorSystem(new Configuration());
}
@AfterClass
public static void teardown() {
JavaTestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void testSetupTaskManager() {
public void testSubmitAndExecuteTask() {
LOG.info( "--------------------------------------------------------------------\n" +
" Starting testSubmitAndExecuteTask() \n" +
"--------------------------------------------------------------------");
new JavaTestKit(system){{
ActorRef jobManager = null;
ActorRef taskManager = null;
try {
jobManager = system.actorOf(Props.create(SimpleJobManager.class));
taskManager = createTaskManager(jobManager);
taskManager = createTaskManager(getTestActor(), false);
final ActorRef tmClosure = taskManager;
// handle the registration
new Within(d) {
@Override
protected void run() {
expectMsgClass(RegistrationMessages.RegisterTaskManager.class);
final InstanceID iid = new InstanceID();
assertEquals(tmClosure, getLastSender());
tmClosure.tell(new RegistrationMessages.AcknowledgeRegistration(
getTestActor(), iid, 12345), getTestActor());
}
};
JobID jid = new JobID();
JobVertexID vid = new JobVertexID();
final JobID jid = new JobID();
final JobVertexID vid = new JobVertexID();
final ExecutionAttemptID eid = new ExecutionAttemptID();
final TaskDeploymentDescriptor tdd = new TaskDeploymentDescriptor(jid, vid, eid, "TestTask", 2, 7,
......@@ -119,13 +142,54 @@ public class TaskManagerTest {
Collections.<InputGateDeploymentDescriptor>emptyList(),
new ArrayList<BlobKey>(), 0);
final ActorRef tmClosure = taskManager;
new Within(d) {
@Override
protected void run() {
tmClosure.tell(new SubmitTask(tdd), getRef());
expectMsgEquals(Messages.getAcknowledge());
// TaskManager should acknowledge the submission
// heartbeats may be interleaved
long deadline = System.currentTimeMillis() + 10000;
do {
Object message = receiveOne(d);
if (message == Messages.getAcknowledge()) {
break;
}
} while (System.currentTimeMillis() < deadline);
// task should have switched to running
Object toRunning = new TaskMessages.UpdateTaskExecutionState(
new TaskExecutionState(jid, eid, ExecutionState.RUNNING));
// task should have switched to finished
Object toFinished = new TaskMessages.UpdateTaskExecutionState(
new TaskExecutionState(jid, eid, ExecutionState.FINISHED));
deadline = System.currentTimeMillis() + 10000;
do {
Object message = receiveOne(d);
if (message.equals(toRunning)) {
break;
}
else if (!(message instanceof TaskManagerMessages.Heartbeat)) {
fail("Unexpected message: " + message);
}
} while (System.currentTimeMillis() < deadline);
deadline = System.currentTimeMillis() + 10000;
do {
Object message = receiveOne(d);
if (message.equals(toFinished)) {
break;
}
else if (!(message instanceof TaskManagerMessages.Heartbeat)) {
fail("Unexpected message: " + message);
}
} while (System.currentTimeMillis() < deadline);
}
};
}
......@@ -138,22 +202,24 @@ public class TaskManagerTest {
if (taskManager != null) {
taskManager.tell(Kill.getInstance(), ActorRef.noSender());
}
if (jobManager != null) {
jobManager.tell(Kill.getInstance(), ActorRef.noSender());
}
}
}};
}
@Test
public void testJobSubmissionAndCanceling() {
LOG.info( "--------------------------------------------------------------------\n" +
" Starting testJobSubmissionAndCanceling() \n" +
"--------------------------------------------------------------------");
new JavaTestKit(system){{
ActorRef jobManager = null;
ActorRef taskManager = null;
try {
jobManager = system.actorOf(Props.create(SimpleJobManager.class));
taskManager = createTaskManager(jobManager);
taskManager = createTaskManager(jobManager, true);
final JobID jid1 = new JobID();
final JobID jid2 = new JobID();
......@@ -274,6 +340,11 @@ public class TaskManagerTest {
@Test
public void testGateChannelEdgeMismatch() {
LOG.info( "--------------------------------------------------------------------\n" +
" Starting testGateChannelEdgeMismatch() \n" +
"--------------------------------------------------------------------");
new JavaTestKit(system){{
ActorRef jobManager = null;
......@@ -281,7 +352,7 @@ public class TaskManagerTest {
try {
jobManager = system.actorOf(Props.create(SimpleJobManager.class));
taskManager = createTaskManager(jobManager);
taskManager = createTaskManager(jobManager, true);
final ActorRef tm = taskManager;
final JobID jid = new JobID();
......@@ -353,6 +424,11 @@ public class TaskManagerTest {
@Test
public void testRunJobWithForwardChannel() {
LOG.info( "--------------------------------------------------------------------\n" +
" Starting testRunJobWithForwardChannel() \n" +
"--------------------------------------------------------------------");
new JavaTestKit(system){{
ActorRef jobManager = null;
......@@ -368,7 +444,7 @@ public class TaskManagerTest {
jobManager = system.actorOf(Props.create(new SimpleLookupJobManagerCreator()));
taskManager = createTaskManager(jobManager);
taskManager = createTaskManager(jobManager, true);
final ActorRef tm = taskManager;
IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
......@@ -470,6 +546,10 @@ public class TaskManagerTest {
@Test
public void testCancellingDependentAndStateUpdateFails() {
LOG.info( "--------------------------------------------------------------------\n" +
" Starting testCancellingDependentAndStateUpdateFails() \n" +
"--------------------------------------------------------------------");
// this tests creates two tasks. the sender sends data, and fails to send the
// state update back to the job manager
// the second one blocks to be canceled
......@@ -491,7 +571,7 @@ public class TaskManagerTest {
new SimpleLookupFailingUpdateJobManagerCreator(eid2)
)
);
taskManager = createTaskManager(jobManager);
taskManager = createTaskManager(jobManager, true);
final ActorRef tm = taskManager;
IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
......@@ -676,7 +756,7 @@ public class TaskManagerTest {
}
}
public static ActorRef createTaskManager(ActorRef jobManager) {
public static ActorRef createTaskManager(ActorRef jobManager, boolean waitForRegistration) {
ActorRef taskManager = null;
try {
Configuration cfg = new Configuration();
......@@ -695,16 +775,18 @@ public class TaskManagerTest {
fail("Could not create test TaskManager: " + e.getMessage());
}
Future<Object> response = Patterns.ask(taskManager,
TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), timeout);
try {
FiniteDuration d = new FiniteDuration(100, TimeUnit.SECONDS);
Await.ready(response, d);
}
catch (Exception e) {
e.printStackTrace();
fail("Exception while waiting for the task manager registration: " + e.getMessage());
if (waitForRegistration) {
Future<Object> response = Patterns.ask(taskManager,
TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), timeout);
try {
FiniteDuration d = new FiniteDuration(100, TimeUnit.SECONDS);
Await.ready(response, d);
}
catch (Exception e) {
e.printStackTrace();
fail("Exception while waiting for the task manager registration: " + e.getMessage());
}
}
return taskManager;
......
......@@ -27,7 +27,7 @@ import org.apache.flink.runtime.io.disk.iomanager.IOManager
import org.apache.flink.runtime.io.network.NetworkEnvironment
import org.apache.flink.runtime.memorymanager.DefaultMemoryManager
import org.apache.flink.runtime.messages.Messages.Disconnect
import org.apache.flink.runtime.messages.TaskMessages.{UpdateTaskExecutionState, UnregisterTask}
import org.apache.flink.runtime.messages.TaskMessages.{TaskInFinalState, UpdateTaskExecutionState}
import org.apache.flink.runtime.taskmanager.{TaskManagerConfiguration, TaskManager}
import org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenJobRemoved
import org.apache.flink.runtime.testingUtils.TestingMessages.DisableDisconnect
......@@ -95,8 +95,8 @@ class TestingTaskManager(config: TaskManagerConfiguration,
}
}
case UnregisterTask(executionID) =>
super.receiveWithLogMessages(UnregisterTask(executionID))
case TaskInFinalState(executionID) =>
super.receiveWithLogMessages(TaskInFinalState(executionID))
waitForRemoval.remove(executionID) match {
case Some(actors) => for(actor <- actors) actor ! true
case None =>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册