diff --git a/cmake/configure.cmake b/cmake/configure.cmake
index 89d7a62fe9aca3a71ad34b976a186a80174bfd5e..6a8b15a6b60a2e5635dc78fc877f0c8da9a2a998 100644
--- a/cmake/configure.cmake
+++ b/cmake/configure.cmake
@@ -118,6 +118,10 @@ endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}")
+if(WITH_DISTRIBUTE)
+ add_definitions(-DPADDLE_WITH_DISTRIBUTE)
+endif()
+
if(WITH_GOLANG)
# we need to symlink Paddle directory into GOPATH. If we
# don't do it and we have code that depends on Paddle, go
diff --git a/doc/survey/dynamic_graph.md b/doc/survey/dynamic_graph.md
new file mode 100644
index 0000000000000000000000000000000000000000..553a9dbe15fcdc67fc10ca479ce080c384f012e8
--- /dev/null
+++ b/doc/survey/dynamic_graph.md
@@ -0,0 +1,378 @@
+# Automatic Differentiation with the Tape
+
+## Automatic Differentiation
+
+A key challenge in the field of deep learning is to automatically derive the backward pass from the forward pass described algorithmically by researchers. Such a derivation, or a transformation of the forward pass program, has been long studied before the recent prosperity of deep learning in the field known as [automatic differentiation](https://arxiv.org/pdf/1502.05767.pdf).
+
+## The Tape
+
+Given the forward pass program (usually in Python in practices), there are two strategies to derive the backward pass:
+
+1. from the forward pass program itself, or
+1. from the execution trace of the forward pass program, which is often known as the *tape*.
+
+This article surveys systems that follow the latter strategy.
+
+## Dynamic Network
+
+When we train a deep learning model, the tape changes every iteration as the input data change, so we have to re-derive the backward pass every iteration. This is known as *dynamic network*.
+
+Deep learning systems that utilize the idea of dynamic network gained their popularities in recent years. This article surveys two representative systems: [PyTorch](https://pytorch.org/) and [DyNet](https://dynet.readthedocs.io/en/latest/).
+
+## An Overview
+
+Both frameworks record a ‘tape’ of the computation and interpreting (or run-time compiling) a transformation of the tape played back in reverse. This tape is a different kind of entity than the original program.[[link]](http://www.bcl.hamilton.ie/~barak/papers/toplas-reverse.pdf)
+
+Consider the following code feedforward model.
+
+```python
+x = Variable(randn(20, 1)))
+label = Variable(randint(1))
+W_1, W_2 = Variable(randn(20, 20)), Variable(randn(10, 20))
+h = matmul(W_1, x)
+pred = matmul(W_2, x)
+loss = softmax(pred, label)
+loss.backward()
+```
+
+### 1) Dynet uses List to encode the Tape
+
+During the forward execution, a list of operators, in this case `matmul`, `matmul` and `softmax`, are recorded in the tape, along with the necessary information needed to do the backward such as pointers to the inputs and outputs. Then the tape is played in reverse order at `loss.backward()`.
+
+
+
+digraph g {
+ graph [
+ rankdir = "LR"
+ ];
+ node [
+ fontsize = "16"
+ shape = "ellipse"
+ ];
+ edge [];
+ "node0" [
+ label = " type: matmul | input: W_1, x | output: h"
+ shape = "record"
+ ];
+ "node1" [
+ label = " type: matmul | input: W_2, h | output: pred"
+ shape = "record"
+ ];
+ "node2" [
+ label = " type: softmax | input: pred, label | output: loss"
+ shape = "record"
+ ];
+ "node0":f0 -> "node1":f0 [];
+ "node1":f0 -> "node2":f0 [];
+}
+
+
+![Alt text](https://g.gravizo.com/svg?digraph%20g%20{%20graph%20[%20rankdir%20=%20%22LR%22%20];%20node%20[%20fontsize%20=%20%2216%22%20shape%20=%20%22ellipse%22%20];%20edge%20[];%20%22node0%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20%3Cf1%3E%20input:%20W_1,%20x%20|%20%3Cf2%3E%20output:%20h%22%20shape%20=%20%22record%22%20];%20%22node1%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20%3Cf1%3E%20input:%20W_2,%20h%20|%20%3Cf2%3E%20output:%20pred%22%20shape%20=%20%22record%22%20];%20%22node2%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20softmax%20|%20%3Cf1%3E%20input:%20pred,%20label%20|%20%3Cf2%3E%20output:%20loss%22%20shape%20=%20%22record%22%20];%20%22node0%22:f0%20-%3E%20%22node1%22:f0%20[%20id%20=%200%20];%20%22node1%22:f0%20-%3E%20%22node2%22:f0%20[%20id%20=%201%20];%20})
+
+### 2) Pytorch uses Node Graph to encode the Tape
+
+The graph is composed of `Variable`s and `Function`s. During the forward execution, a `Variable` records its creator function, e.g. `h.creator = matmul`. And a Function records its inputs' previous/dependent functions `prev_func` through `creator`, e.g. `matmul.prev_func = matmul1`. At `loss.backward()`, a topological sort is performed on all `prev_func`s. Then the grad op is performed by the sorted order.
+
+
+
+digraph g {
+ graph [
+ rankdir = "LR"
+ ];
+
+ subgraph function {
+ node [
+ fontsize = "16"
+ style = filled
+ shape = "record"
+ ];
+ "matmul0" [ label = " type: matmul | prev_func: None" ];
+ "matmul1" [ label = " type: matmul | prev_func: matmul" ];
+ "softmax" [ label = " type: softmax | prev_func: matmul" ];
+ }
+
+ subgraph variable {
+ node [
+ fontsize = "16"
+ shape = "Mrecord"
+ style = filled
+ fillcolor = white
+ ];
+ "x" [ label = " x | creator: None" ];
+ "label" [ label = " label | creator: None" ];
+ "W_1" [ label = " W_1 | creator: None" ];
+ "W_2" [ label = " W_2 | creator: None" ];
+ "h" [ label = " h | creator: None" ];
+ "pred" [ label = " pred | creator: matmul" ];
+ "loss" [ label = " loss | creator: softmax" ];
+ }
+
+ subgraph data_flow {
+ "x":f0 -> "matmul0":f0;
+ "W_1":f0 -> "matmul0":f0;
+ "matmul0":f0 -> "h":f0;
+
+ "h":f0 -> "matmul1":f0;
+ "W_2":f0 -> "matmul1":f0;
+ "matmul1":f0 -> "pred":f0;
+
+ "pred":f0 -> "softmax":f0;
+ "label":f0 -> "softmax":f0;
+ "softmax":f0 -> "loss":f0;
+ }
+
+ subgraph prev_func {
+ edge [color="red", arrowsize="0.6", penwidth="1", constraint=false];
+ "matmul1":f1 -> "matmul0":f0;
+ "softmax":f1 -> "matmul1":f0;
+ label = "prev_func";
+ }
+}
+
+
+![Alt text](https://g.gravizo.com/svg?digraph%20g%20{%20graph%20[%20rankdir%20=%20%22LR%22%20];%20subgraph%20function%20{%20node%20[%20fontsize%20=%20%2216%22%20style%20=%20filled%20shape%20=%20%22record%22%20];%20%22matmul0%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20prev_func:%20None%22%20];%20%22matmul1%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20prev_func:%20matmul%22%20];%20%22softmax%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20softmax%20|%20prev_func:%20matmul%22%20];%20}%20subgraph%20variable%20{%20node%20[%20fontsize%20=%20%2216%22%20shape%20=%20%22Mrecord%22%20style%20=%20filled%20fillcolor%20=%20white%20];%20%22x%22%20[%20label%20=%20%22%3Cf0%3E%20x%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22label%22%20[%20label%20=%20%22%3Cf0%3E%20label%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22W_1%22%20[%20label%20=%20%22%3Cf0%3E%20W_1%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22W_2%22%20[%20label%20=%20%22%3Cf0%3E%20W_2%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22h%22%20[%20label%20=%20%22%3Cf0%3E%20h%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22pred%22%20[%20label%20=%20%22%3Cf0%3E%20pred%20|%20%3Cf1%3E%20creator:%20matmul%22%20];%20%22loss%22%20[%20label%20=%20%22%3Cf0%3E%20loss%20|%20%3Cf1%3E%20creator:%20softmax%22%20];%20}%20subgraph%20data_flow%20{%20%22x%22:f0%20-%3E%20%22matmul0%22:f0;%20%22W_1%22:f0%20-%3E%20%22matmul0%22:f0;%20%22matmul0%22:f0%20-%3E%20%22h%22:f0;%20%22h%22:f0%20-%3E%20%22matmul1%22:f0;%20%22W_2%22:f0%20-%3E%20%22matmul1%22:f0;%20%22matmul1%22:f0%20-%3E%20%22pred%22:f0;%20%22pred%22:f0%20-%3E%20%22softmax%22:f0;%20%22label%22:f0%20-%3E%20%22softmax%22:f0;%20%22softmax%22:f0%20-%3E%20%22loss%22:f0;%20}%20subgraph%20prev_func%20{%20edge%20[color=%22red%22,%20arrowsize=%220.6%22,%20penwidth=%221%22,%20constraint=false];%20%22matmul1%22:f1%20-%3E%20%22matmul0%22:f0;%20%22softmax%22:f1%20-%3E%20%22matmul1%22:f0;%20label%20=%20%22prev_func%22;%20}%20})
+
+Chainer and Autograd uses the similar techniques to record the forward pass. For details please refer to the appendix.
+
+## Design choices
+
+### 1) Dynet's List vs Pytorch's Node Graph
+
+What's good about List:
+1. It avoids a topological sort. One only needs to traverse the list of operators in reverse and calling the corresponding backward operator.
+1. It promises effient data parallelism implementations. One could count the time of usage of a certain variable during the construction list. Then in the play back, one knows the calculation of a variable has completed. This enables communication and computation overlapping.
+
+What's good about Node Graph:
+1. More flexibility. PyTorch users can mix and match independent graphs however they like, in whatever threads they like (without explicit synchronization). An added benefit of structuring graphs this way is that when a portion of the graph becomes dead, it is automatically freed. [[2]](https://openreview.net/pdf?id=BJJsrmfCZ) Consider the following example, Pytorch only does backward on SmallNet while Dynet does both BigNet and SmallNet.
+```python
+result = BigNet(data)
+loss = SmallNet(data)
+loss.backward()
+```
+
+### 2) Dynet's Lazy evaluation vs Pytorch's Immediate evaluation
+
+Dynet builds the list in a symbolic matter. Consider the following example
+```python
+for epoch in range(num_epochs):
+ for in_words, out_label in training_data:
+ dy.renew_cg()
+ W = dy.parameter(W_p)
+ b = dy.parameter(b_p)
+ score_sym = dy.softmax(W*dy.concatenate([E[in_words[0]],E[in_words[1]]])+b)
+ loss_sym = dy.pickneglogsoftmax(score_sym, out_label)
+ loss_val = loss_sym.value()
+ loss_sym.backward()
+```
+The computation of `lookup`, `concat`, `matmul` and `softmax` didn't happen until the call of `loss_sym.value()`. This defered execution is useful because it allows some graph-like optimization possible, e.g. kernel fusion.
+
+Pytorch chooses immediate evaluation. It avoids ever materializing a "forward graph"/"tape" (no need to explicitly call `dy.renew_cg()` to reset the list), recording only what is necessary to differentiate the computation, i.e. `creator` and `prev_func`.
+
+
+## What can fluid learn from them?
+
+TBD
+
+# Appendix
+
+### Overview
+
+| Framework | Has Tape | Core in C++ | First Release Date |
+|-----------|----------|-------------|--------------------|
+| Autograd | No | No | Mar 5, 2015 |
+| Chainer | No | No | Jun 5, 2015 |
+| Pytorch | No | Yes | Aug 31, 2016 |
+| Dynet | Yes | Yes | Oct 12, 2016 |
+
+### Source Code
+#### Autograd
+[Backward code](https://github.com/HIPS/autograd/blob/442205dfefe407beffb33550846434baa90c4de7/autograd/core.py#L8-L40). In the forward pass, a graph of VJPNode is constructed.
+```python
+# User API
+def make_grad(fun, x):
+ start_node = VJPNode.new_root()
+ end_value, end_node = trace(start_node, fun, x)
+ return backward_pass(g, end_node), end_value
+
+# trace the forward pass by creating VJPNodes
+def trace(start_node, fun, x):
+ with trace_stack.new_trace() as t:
+ start_box = new_box(x, t, start_node)
+ end_box = fun(start_box)
+ return end_box._value, end_box._node
+
+def backward_pass(g, end_node):
+ outgrads = {end_node : (g, False)}
+ for node in toposort(end_node):
+ outgrad = outgrads.pop(node)
+ ingrads = node.vjp(outgrad[0])
+ for parent, ingrad in zip(node.parents, ingrads):
+ outgrads[parent] = add_outgrads(outgrads.get(parent), ingrad)
+ return outgrad[0]
+
+# Every VJPNode corresponds to a op_grad
+class VJPNode(Node):
+ __slots__ = ['parents', 'vjp']
+ def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
+ self.parents = parents
+ vjpmaker = primitive_vjps[fun]
+ self.vjp = vjpmaker(parent_argnums, value, args, kwargs)
+```
+#### Chainer
+Example Code
+```python
+# (1) Function Set definition, creates FunctionNode
+model = FunctionSet(
+ l1=F.Linear(784, 100),
+ l2=F.Linear(100, 100),
+ l3=F.Linear(100, 10)).to_gpu()
+
+# (2) Optimizer Setup
+opt = optimizers.SGD()
+opt.setup(model)
+
+# (3) Forward computation
+def forward(x, t):
+ h1 = F.relu(model.l1(x))
+ h2 = F.relu(model.l2(h1))
+ y = model.l3(h2)
+ return F.softmax_cross_entropy(y, t)
+
+# (4) Training loop
+for epoch in xrange(n_epoch):
+ for i in xrange(0, N, b_size):
+ x = Variable(to_gpu(...))
+ t = Variable(to_gpu(...))
+ opt.zero_grads()
+ loss = forward(x, t)
+ loss.backward()
+ opt.update()
+```
+In `forward(x, t)`, a graph of [`VariableNode`](https://github.com/chainer/chainer/blob/master/chainer/variable.py#L110) and [`FunctionNode`](https://github.com/chainer/chainer/blob/a69103a4aa59d5b318f39b01dbcb858d465b89cf/chainer/function_node.py#L19) is constructed. Every output's `VariableNode.creator` is pointed to the `FunctionNode`.
+```python
+class FunctionNode(object):
+ ...
+ def apply(self, inputs):
+ outputs = self.forward(inputs)
+ ret = tuple([variable.Variable(y, requires_grad=requires_grad)
+ for y in outputs])
+ # Topological ordering
+ self.rank = max([x.rank for x in inputs]) if input_vars else 0
+ # Add backward edges
+ for y in ret:
+ y.creator_node = self
+ self.inputs = tuple([x.node for x in input_vars])
+ self.outputs = tuple([y.node for y in ret])
+
+ return ret
+```
+`loss.backward()` will calculate the accumulated gradient of all variables. All the backward of `FunctionNode`s will be called based on the topological order.
+```python
+class VariableNode(object):
+ ...
+ def backward(self, retain_grad, loss_scale):
+ if self.creator_node is None:
+ return
+
+ cand_funcs = []
+ seen_set = set()
+ grads = {}
+
+ # Initialize error by 1, if this is a loss variable
+ if self.data.size == 1 and self._grad_var is None:
+ self.grad = numpy.ones_like(self.data)
+ grads[self._node] = self._grad_var
+
+ def add_cand(cand):
+ if cand not in seen_set:
+ # Negate since heapq is min-heap. This is a global variable
+ heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))
+ seen_set.add(cand)
+
+ add_cand(self.creator_node)
+
+ while cand_funcs:
+ _, _, func = heapq.heappop(cand_funcs)
+ gxs = func.backward_accumulate(func.inputs, func.outputs, func.outputs.grad)
+
+ for x, gx in enumerate(gxs):
+ if x in grads:
+ grads[x] += gx
+ else:
+ grads[x] = gx
+
+ if x.creator_node is not None:
+ add_cand(x.creator_node)
+```
+
+#### PyTorch
+Example Code
+```python
+x = Variable(torch.ones(5, 5))
+y = Variable(torch.ones(5, 5) * 4)
+z = x ** 2 + x * 2 + x * y + y
+z.backward(torch.ones(5, 5))
+```
+The trace is done by `Variable.creator` and `Function.previous_functions`.
+```python
+class Variable(object):
+ def __init__(self, tensor, creator=None, requires_grad=True):
+ if creator is None:
+ creator = Leaf(self, requires_grad)
+ self.data = tensor
+ self.creator = creator
+ self._grad = None
+
+ def backward(self, gradient=None):
+ if gradient is None:
+ if self.data.numel() != 1:
+ raise RuntimeError('backward should be called only on a scalar (i.e. 1-element tensor) or with gradient w.r.t. the variable')
+ gradient = self.data.new(1).fill_(1)
+ self._execution_engine.run_backward(self, gradient)
+
+class Function(obejct):
+ # ...
+ def _do_forward(self, *input):
+ unpacked_input = tuple(arg.data for arg in input)
+ raw_output = self.forward(*unpacked_input)
+
+ # mark output.creator = self for backward trace
+ output = tuple(Variable(tensor, self) for tensor in raw_output)
+
+ self.previous_functions = [(arg.creator, id(arg)) for arg in input]
+ self.output_ids = {id(var): i for i, var in enumerate(output)}
+ return output
+
+ def _do_backward(self, grad_output):
+ return self.backwaerd(grad_output)
+```
+The [backward](https://github.com/pytorch/pytorch/blob/v0.1.1/torch/autograd/engine.py) is similar to Autograd.
+
+#### DyNet
+Example code
+```python
+model = dy.model()
+W_p = model.add_parameters((20, 100))
+b_p = model.add_parameters(20)
+E = model.add_lookup_parameters((20000, 50))
+for epoch in range(num_epochs):
+ for in_words, out_label in training_data:
+ dy.renew_cg() # init tape
+ W = dy.parameter(W_p)
+ b = dy.parameter(b_p)
+ score_sym = dy.softmax(W*dy.concatenate([E[in_words[0]],E[in_words[1]]])+b)
+ loss_sym = dy.pickneglogsoftmax(score_sym, out_label)
+ loss_val = loss_sym.value()
+ loss_sym.backward()
+```
+[forward](https://github.com/clab/dynet/blob/740a9626a13a2732544de142e256ad0d0a166658/dynet/exec.cc#L84-L158), [backward](https://github.com/clab/dynet/blob/740a9626a13a2732544de142e256ad0d0a166658/dynet/exec.cc#L166-L284). The trace is done by creating a tape of expressions in every iteration. Backward is done by traverse the tape in the reverse order.
+```c++
+void SimpleExecutionEngine::backward(VariableIndex from_where, bool full) {
+ ...
+ for (int i = num_nodes - 1; i >= 0; --i) {
+ // each node corresponds to an op
+ node->backward(xs, node_fx, node_dEdfx, ai, node_dEdxai);
+ }
+ ...
+}
+```
diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt
index 4271e4c1bb6bc7b83f2633191ea2d464f4f56c4c..6bc770580640f242cfce6a9838f00210f785010a 100644
--- a/paddle/fluid/framework/CMakeLists.txt
+++ b/paddle/fluid/framework/CMakeLists.txt
@@ -83,8 +83,13 @@ cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor)
cc_library(feed_fetch_method SRCS feed_fetch_method.cc DEPS lod_tensor scope glog)
-cc_library(executor SRCS executor.cc DEPS op_registry device_context scope
-framework_proto glog lod_rank_table feed_fetch_method)
+if(WITH_DISTRIBUTE)
+ cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr)
+ set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
+ set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
+else()
+ cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method)
+endif()
cc_library(parallel_executor SRCS parallel_executor.cc DEPS ssa_graph_builder_factory threaded_ssa_graph_executor scope_buffered_ssa_graph_executor)
diff --git a/paddle/fluid/framework/details/ssa_graph_checker.h b/paddle/fluid/framework/details/ssa_graph_checker.h
index 542c4a172891ba9d3621918986089b2e400b6ae8..304b221e7e4c414a0ab562a1b99836d3b7c02efb 100644
--- a/paddle/fluid/framework/details/ssa_graph_checker.h
+++ b/paddle/fluid/framework/details/ssa_graph_checker.h
@@ -19,7 +19,7 @@
namespace paddle {
namespace framework {
namespace details {
-class SSAGraph;
+struct SSAGraph;
class SSAGraghBuilderWithChecker : public SSAGraphBuilder {
public:
diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc
index 0c050ca271da8eb79d3271db470eba4decde42c3..e15232a77bb9c3e325b55737ea7abc55e3121708 100644
--- a/paddle/fluid/framework/executor.cc
+++ b/paddle/fluid/framework/executor.cc
@@ -20,6 +20,9 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
+#ifdef PADDLE_WITH_DISTRIBUTE
+#include "paddle/fluid/operators/detail/grpc_client.h"
+#endif
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
@@ -44,6 +47,14 @@ ExecutorPrepareContext::~ExecutorPrepareContext() {
Executor::Executor(const platform::Place& place) : place_(place) {}
+#ifdef PADDLE_WITH_DISTRIBUTE
+void Executor::Complete() {
+ ::paddle::operators::detail::RPCClient::GetInstance<
+ ::paddle::operators::detail::GRPCClient>()
+ ->SendComplete();
+}
+#endif
+
void InitializeVariable(Variable* var, proto::VarType::Type var_type) {
if (var_type == proto::VarType::LOD_TENSOR) {
var->GetMutable();
diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h
index e6f9c3d31c18f762ef2de269977e0642a79fb174..67a0761dac2a9adcdd0ce2b218c4aa505d688d56 100644
--- a/paddle/fluid/framework/executor.h
+++ b/paddle/fluid/framework/executor.h
@@ -44,6 +44,13 @@ class Executor {
explicit Executor(const platform::Place& place);
+#ifdef PADDLE_WITH_DISTRIBUTE
+ /*
+ * Sending signal to pserver to mark current trainer stop.
+ */
+ void Complete();
+#endif
+
/* @Brief
* Runtime evaluation of the given ProgramDesc under certain Scope
*
diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc
index 6b8373b1509c898e6ae70a18833df39a4898714a..02ffe3651e1deefcf6981c3d304d64b9a01661bf 100644
--- a/paddle/fluid/operators/detail/grpc_client.cc
+++ b/paddle/fluid/operators/detail/grpc_client.cc
@@ -34,6 +34,12 @@ void GRPCClient::InitEventLoop() {
client_thread_.reset(new std::thread(std::bind(&GRPCClient::Proceed, this)));
}
+void GRPCClient::SendComplete() {
+ for (auto& it : channels_) {
+ this->AsyncSendComplete(it.first);
+ }
+}
+
GRPCClient::~GRPCClient() {
Wait();
cq_.Shutdown();
@@ -210,6 +216,19 @@ void GRPCClient::AsyncSendFetchBarrier(const std::string& ep,
req_count_++;
}
+void GRPCClient::AsyncSendComplete(const std::string& ep, int64_t time_out) {
+ const auto ch = GetChannel(ep);
+
+ BatchBarrierProcessor* s = new BatchBarrierProcessor(ch);
+ s->Prepare(time_out);
+
+ sendrecv::VariableMessage req;
+ req.set_varname(COMPLETE_MESSAGE);
+ auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_);
+ rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s));
+ req_count_++;
+}
+
void GRPCClient::Wait() {
std::unique_lock lk(sync_mutex_);
sync_cond_.wait(lk, [this] { return req_count_ == 0; });
diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h
index 8db73f875e3e2048386e91f6b5efb29b4ee7e193..44000c028b499d9ad1a0e0dd40a5e287cd61d143 100644
--- a/paddle/fluid/operators/detail/grpc_client.h
+++ b/paddle/fluid/operators/detail/grpc_client.h
@@ -195,6 +195,8 @@ class GRPCClient : public RPCClient {
void Wait() override;
+ void SendComplete() override;
+
protected:
void InitImpl() override;
@@ -204,6 +206,9 @@ class GRPCClient : public RPCClient {
void Proceed();
+ void AsyncSendComplete(const std::string& ep,
+ int64_t time_out = RPCClient::rpc_time_out);
+
std::shared_ptr GetChannel(const std::string& ep);
private:
diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc
index 18651544a1c0207127be335c37fe85c6e24dc16e..2d34f85838c34f1dfe43d2130e127d0258072fa7 100644
--- a/paddle/fluid/operators/detail/grpc_server.cc
+++ b/paddle/fluid/operators/detail/grpc_server.cc
@@ -162,16 +162,18 @@ class RequestPrefetch final : public RequestBase {
void Process() override {
// prefetch process...
- std::string varname = request_->OutVarname();
- VLOG(3) << "RequestPrefetch " << varname;
+ std::string in_var_name = request_->Varname();
+ std::string out_var_name = request_->OutVarname();
+ VLOG(3) << "RequestPrefetch, in_var_name: " << in_var_name
+ << " out_var_name: " << out_var_name;
auto scope = request_->GetMutableLocalScope();
- auto invar = scope->FindVar(varname);
- framework::Variable* outvar = nullptr;
+ auto invar = scope->FindVar(in_var_name);
+ framework::Variable* outvar = scope->FindVar(out_var_name);
- request_handler_->Handle(varname, scope, invar, &outvar);
+ request_handler_->Handle(in_var_name, scope, invar, &outvar, out_var_name);
- SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(),
+ SerializeToByteBuffer(out_var_name, outvar, *request_handler_->dev_ctx(),
&reply_);
Finish(reply_, &responder_);
}
@@ -287,7 +289,7 @@ void AsyncGRPCServer::TryToRegisterNewOne(const std::string& rpc_name,
} else if (rpc_name == kRequestPrefetch) {
b = new RequestPrefetch(&service_, cq.get(), handler, req_id);
} else {
- PADDLE_ENFORCE(false, "not surpported rpc");
+ PADDLE_ENFORCE(false, "not supported rpc");
}
reqs[req_id] = b;
diff --git a/paddle/fluid/operators/detail/request_handler.h b/paddle/fluid/operators/detail/request_handler.h
index fa979024e37f435b918568a1c5e603f8962f9172..a2d08747d59220d30a5b8fd56074fd2739ae3bab 100644
--- a/paddle/fluid/operators/detail/request_handler.h
+++ b/paddle/fluid/operators/detail/request_handler.h
@@ -40,6 +40,7 @@ constexpr char kRequestPrefetch[] = "RequestPrefetch";
#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV"
#define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV"
#define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV"
+#define COMPLETE_MESSAGE "COMPLETE@RECV"
class RPCServer;
@@ -60,9 +61,12 @@ class RequestHandler {
void SetDevCtx(const platform::DeviceContext* dev_ctx) { dev_ctx_ = dev_ctx; }
void SetProgram(framework::ProgramDesc* program) { program_ = program; }
void SetExecutor(framework::Executor* executor) { executor_ = executor; }
+
+ // Used for dist lookup table prefetch
void SetPrefetchPreparedCtx(
- std::unique_ptr prepared) {
- prefetch_ctx_.reset(prepared.release());
+ std::unordered_map<
+ std::string, std::shared_ptr>* g) {
+ prefetch_var_name_to_prepared_ctx_ = g;
}
// Used for async.
@@ -78,9 +82,6 @@ class RequestHandler {
bool sync_mode() { return sync_mode_; }
framework::Scope* scope() { return scope_; }
const platform::DeviceContext* dev_ctx() { return dev_ctx_; }
- framework::ExecutorPrepareContext* prefetch_ctx() {
- return prefetch_ctx_.get();
- }
framework::ProgramDesc* program() { return program_; }
framework::Executor* executor() { return executor_; }
@@ -99,8 +100,8 @@ class RequestHandler {
// *request_handler_->dev_ctx(), &reply_);
// }
virtual bool Handle(const std::string& varname, framework::Scope* scope,
- framework::Variable* var,
- framework::Variable** outvar) = 0;
+ framework::Variable* var, framework::Variable** outvar,
+ const std::string& out_var_name = "") = 0;
protected:
const bool sync_mode_;
@@ -109,12 +110,17 @@ class RequestHandler {
framework::Executor* executor_;
framework::Scope* scope_;
framework::ProgramDesc* program_;
- std::unique_ptr prefetch_ctx_;
+
+ // used for distribute lookup table prefetch
+ std::unordered_map>*
+ prefetch_var_name_to_prepared_ctx_;
// Used for async.
std::unordered_map>*
grad_to_prepared_ctx_;
+
RPCServer* rpc_server_;
};
diff --git a/paddle/fluid/operators/detail/request_handler_impl.cc b/paddle/fluid/operators/detail/request_handler_impl.cc
index 5f1a346e93b1a0239af77b86d10782d67c403e23..7425bee798cd9ba0af8cd777a6db63862c8a4031 100644
--- a/paddle/fluid/operators/detail/request_handler_impl.cc
+++ b/paddle/fluid/operators/detail/request_handler_impl.cc
@@ -30,7 +30,8 @@ namespace detail {
bool RequestSendHandler::Handle(const std::string& varname,
framework::Scope* scope,
framework::Variable* invar,
- framework::Variable** outvar) {
+ framework::Variable** outvar,
+ const std::string& out_var_name) {
VLOG(4) << "RequestSendHandler:" << varname;
// Async
@@ -49,6 +50,9 @@ bool RequestSendHandler::Handle(const std::string& varname,
if (varname == BATCH_BARRIER_MESSAGE) {
VLOG(3) << "sync: recv batch barrier message";
rpc_server_->IncreaseBatchBarrier(kRequestSend);
+ } else if (varname == COMPLETE_MESSAGE) {
+ VLOG(3) << "sync: recv complete message";
+ rpc_server_->DecreaseClientNum();
} else {
VLOG(3) << "sync: received var_name: " << varname;
if (sync_mode_) {
@@ -79,7 +83,8 @@ void RequestSendHandler::ResetSparseVarRecorder() {
bool RequestGetHandler::Handle(const std::string& varname,
framework::Scope* scope,
framework::Variable* invar,
- framework::Variable** outvar) {
+ framework::Variable** outvar,
+ const std::string& out_var_name) {
VLOG(4) << "RequestGetHandler:" << varname;
if (varname != FETCH_BARRIER_MESSAGE) {
@@ -102,13 +107,14 @@ bool RequestGetHandler::Handle(const std::string& varname,
bool RequestPrefetchHandler::Handle(const std::string& varname,
framework::Scope* scope,
framework::Variable* invar,
- framework::Variable** outvar) {
+ framework::Variable** outvar,
+ const std::string& out_var_name) {
VLOG(4) << "RequestPrefetchHandler " << varname;
- auto var_desc = program_->Block(0).FindVar(varname);
- *outvar = scope->FindVar(varname);
+ auto var_desc = program_->Block(0).FindVar(out_var_name);
InitializeVariable(*outvar, var_desc->GetType());
- executor_->RunPreparedContext(prefetch_ctx_.get(), scope);
+ executor_->RunPreparedContext(
+ (*prefetch_var_name_to_prepared_ctx_)[varname].get(), scope);
return true;
}
diff --git a/paddle/fluid/operators/detail/request_handler_impl.h b/paddle/fluid/operators/detail/request_handler_impl.h
index c392267cfaeb7e94e6a23d6445c09b756e7e58b1..3f77c09a9598b431d747f1b824615e49d939098e 100644
--- a/paddle/fluid/operators/detail/request_handler_impl.h
+++ b/paddle/fluid/operators/detail/request_handler_impl.h
@@ -39,7 +39,8 @@ class RequestSendHandler final : public RequestHandler {
explicit RequestSendHandler(bool sync_mode) : RequestHandler(sync_mode) {}
virtual ~RequestSendHandler() {}
bool Handle(const std::string& varname, framework::Scope* scope,
- framework::Variable* var, framework::Variable** outvar) override;
+ framework::Variable* var, framework::Variable** outvar,
+ const std::string& out_var_name = "") override;
void ResetSparseVarRecorder();
private:
@@ -52,7 +53,8 @@ class RequestGetHandler final : public RequestHandler {
explicit RequestGetHandler(bool sync_mode) : RequestHandler(sync_mode) {}
virtual ~RequestGetHandler() {}
bool Handle(const std::string& varname, framework::Scope* scope,
- framework::Variable* var, framework::Variable** outvar) override;
+ framework::Variable* var, framework::Variable** outvar,
+ const std::string& out_var_name = "") override;
};
class RequestPrefetchHandler final : public RequestHandler {
@@ -60,7 +62,8 @@ class RequestPrefetchHandler final : public RequestHandler {
explicit RequestPrefetchHandler(bool sync_mode) : RequestHandler(sync_mode) {}
virtual ~RequestPrefetchHandler() {}
bool Handle(const std::string& varname, framework::Scope* scope,
- framework::Variable* var, framework::Variable** outvar) override;
+ framework::Variable* var, framework::Variable** outvar,
+ const std::string& out_var_name = "") override;
};
} // namespace detail
diff --git a/paddle/fluid/operators/detail/rpc_client.h b/paddle/fluid/operators/detail/rpc_client.h
index 7e76ac0348574d4090793b191be0ff3ff8666b37..47c6ffb4fd7a002fc0bd8053fb3314a2fbf18fd3 100644
--- a/paddle/fluid/operators/detail/rpc_client.h
+++ b/paddle/fluid/operators/detail/rpc_client.h
@@ -53,6 +53,11 @@ class RPCClient {
virtual void AsyncSendFetchBarrier(const std::string& ep,
int64_t time_out = rpc_time_out) = 0;
+ // SendComplete tells all the server that current trainer have no more data
+ // to train, so that the pserver can reduce it's barrier count, and continue
+ // to train with other trainers.
+ virtual void SendComplete() = 0;
+
virtual void Wait() = 0;
static constexpr int64_t rpc_time_out = 120 * 1000;
diff --git a/paddle/fluid/operators/detail/rpc_server.cc b/paddle/fluid/operators/detail/rpc_server.cc
index 448763372a8c224cc68319a4a444915896b68234..cd0fe96e2301ee3304fe9a2967df58b9f7072d8d 100644
--- a/paddle/fluid/operators/detail/rpc_server.cc
+++ b/paddle/fluid/operators/detail/rpc_server.cc
@@ -43,7 +43,7 @@ void RPCServer::SavePort() const {
void RPCServer::WaitBarrier(const std::string& rpc_name) {
std::unique_lock lock(this->mutex_);
- barrier_cond_.wait(lock, [=] {
+ barrier_cond_.wait(lock, [this, &rpc_name] {
return (barrier_counter_[rpc_name] >= client_num_ || exit_flag_.load());
});
@@ -53,19 +53,23 @@ void RPCServer::WaitBarrier(const std::string& rpc_name) {
void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) {
VLOG(3) << "RPCServer begin IncreaseBatchBarrier " << rpc_name;
int b = 0;
- {
- std::unique_lock lock(mutex_);
- b = ++barrier_counter_[rpc_name];
- }
-
- VLOG(3) << "RPCServer IncreaseBatchBarrier " << rpc_name
- << ", barrier_count:" << b << ", fan_in" << client_num_;
-
+ std::unique_lock lock(mutex_);
+ b = ++barrier_counter_[rpc_name];
if (b >= client_num_) {
+ lock.unlock();
barrier_cond_.notify_all();
+ lock.lock();
}
}
+void RPCServer::DecreaseClientNum() {
+ {
+ std::unique_lock lock(mutex_);
+ client_num_--;
+ }
+ barrier_cond_.notify_all();
+}
+
void RPCServer::ResetBarrierCounter() {
VLOG(3) << "RPCServer ResetBarrierCounter ";
std::unique_lock lock(mutex_);
diff --git a/paddle/fluid/operators/detail/rpc_server.h b/paddle/fluid/operators/detail/rpc_server.h
index f809c13c726ac2f1c60e8cf84848c4138f631b44..2e3342428cb56c34abaca655d5906668cda8f140 100644
--- a/paddle/fluid/operators/detail/rpc_server.h
+++ b/paddle/fluid/operators/detail/rpc_server.h
@@ -60,7 +60,7 @@ class RPCServer {
void SetCond(const std::string& rpc_name);
void WaitCond(const std::string& rpc_name);
void IncreaseBatchBarrier(const std::string rpc_name);
-
+ void DecreaseClientNum();
void ResetBarrierCounter();
protected:
@@ -79,8 +79,7 @@ class RPCServer {
std::string bind_address_;
std::atomic exit_flag_;
int selected_port_;
-
- const int client_num_;
+ int client_num_;
std::unordered_map rpc_call_map_;
std::unordered_map rpc_thread_num_;
diff --git a/paddle/fluid/operators/detail/rpc_server_test.cc b/paddle/fluid/operators/detail/rpc_server_test.cc
index f49274a7b53d04f82718887aabaffd5d33053dfe..463a7b80cfac280de5afe91ee85caaaf074cef32 100644
--- a/paddle/fluid/operators/detail/rpc_server_test.cc
+++ b/paddle/fluid/operators/detail/rpc_server_test.cc
@@ -98,11 +98,17 @@ void StartServer() {
framework::Executor exe(place);
platform::CPUDeviceContext ctx(place);
auto* block = AppendPrefetchBlcok(&program);
- auto prepared = exe.Prepare(program, block->ID());
+ std::string in_var_name("ids");
+ std::vector prefetch_block_ids{block->ID()};
+ auto prepared = exe.Prepare(program, prefetch_block_ids);
InitTensorsOnServer(&scope, &place, 10);
+ std::unordered_map>
+ prefetch_var_name_to_prepared;
+ prefetch_var_name_to_prepared[in_var_name] = prepared[0];
g_req_handler->SetProgram(&program);
- g_req_handler->SetPrefetchPreparedCtx(std::move(prepared));
+ g_req_handler->SetPrefetchPreparedCtx(&prefetch_var_name_to_prepared);
g_req_handler->SetDevCtx(&ctx);
g_req_handler->SetScope(&scope);
g_req_handler->SetExecutor(&exe);
diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h
index 0803a6035d342fefdae69297461fc78abbf18414..12364fff96c03c5f9dff23c7c00ceedd043803a6 100644
--- a/paddle/fluid/operators/elementwise_op.h
+++ b/paddle/fluid/operators/elementwise_op.h
@@ -66,40 +66,41 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(-1)
.EqualGreaterThan(-1);
AddComment(string::Sprintf(R"DOC(
-Limited Elementwise %s Operator.
+Limited Elementwise %s Operator
The equation is:
$$%s$$
-$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be
-smaller than or equal to the dimensions of $X$.
+- $X$: a tensor of any dimension.
+- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
There are two cases for this operator:
-1. The shape of $Y$ is same with $X$;
-2. The shape of $Y$ is a congiguous subsequencet of $X$. The trailing dimensions
- of size 1 for $Y$ will be ignored for the consideration of subsequence.
+1. The shape of $Y$ is the same with $X$.
+2. The shape of $Y$ is a continuous subsequence of $X$.
For case 2:
-$Y$ will be broadcasted to match the shape of $X$ and axis should be
-set to index of the start dimension to broadcast $Y$ onto $X$.
+1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
+ for broadcasting $Y$ onto $X$.
+2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
+3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
+ subsequence, such as shape(Y) = (2, 1) => (2).
-If axis is -1, it is treated as axis=rank(X)-rank(Y).
+For example:
-For example
.. code-block:: python
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
- shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
+ shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
-Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details)
-information. However, the output only shares the LoD information with input $X$.
+The inputs $X$ and $Y$ can carry the different LoD information.
+But the output only shares the LoD information with the input $X$.
)DOC",
GetName(), GetEquation()));
diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc
index 76106004404bb0bb108cca003869f07c43bbf62f..4d12278799f66f2fb92b7580ba0c43e845aa4d3a 100644
--- a/paddle/fluid/operators/listen_and_serv_op.cc
+++ b/paddle/fluid/operators/listen_and_serv_op.cc
@@ -96,19 +96,22 @@ static int64_t GetTimestamp() {
return tp.tv_sec * 1000 + tp.tv_usec / 1000;
}
-void ListenAndServOp::RunSyncLoop(framework::Executor *executor,
- framework::ProgramDesc *program,
- framework::Scope *recv_scope,
- framework::BlockDesc *prefetch_block) const {
+void ListenAndServOp::RunSyncLoop(
+ framework::Executor *executor, framework::ProgramDesc *program,
+ framework::Scope *recv_scope,
+ const std::vector &prefetch_block_id_list) const {
size_t num_blocks = program->Size();
PADDLE_ENFORCE_GE(num_blocks, 2,
"server program should have at least 2 blocks");
- std::vector block_list;
- for (size_t blkid = 1; blkid < num_blocks; ++blkid) {
- block_list.push_back(blkid);
+ std::vector optimize_block_id_list;
+ for (int blkid = 1; blkid < num_blocks; ++blkid) {
+ if (std::find(prefetch_block_id_list.begin(), prefetch_block_id_list.end(),
+ blkid) == prefetch_block_id_list.end()) {
+ optimize_block_id_list.push_back(blkid);
+ }
}
- auto optimize_prepared = executor->Prepare(*program, block_list);
+ auto optimize_prepared = executor->Prepare(*program, optimize_block_id_list);
// Insert placeholder for block0 which holds current op itself.
optimize_prepared.insert(
optimize_prepared.begin(),
@@ -135,16 +138,17 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor,
std::vector parallel_blkids;
parallel_blkids.push_back(1);
double ts = GetTimestamp();
- for (size_t blkid = 2; blkid < num_blocks; ++blkid) {
- if (blkid != static_cast(prefetch_block->ID())) {
- if (program->Block(blkid).Parent() != last_parent_blkid) {
- ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared,
- program, recv_scope);
- parallel_blkids.clear();
- last_parent_blkid = program->Block(blkid).Parent();
- }
- parallel_blkids.push_back(blkid);
+ for (size_t i = 1; i < optimize_block_id_list.size(); ++i) {
+ // skip the first optimize block because it is already in the
+ // parallel_blkids.
+ int blkid = optimize_block_id_list[i];
+ if (program->Block(blkid).Parent() != last_parent_blkid) {
+ ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared,
+ program, recv_scope);
+ parallel_blkids.clear();
+ last_parent_blkid = program->Block(blkid).Parent();
}
+ parallel_blkids.push_back(blkid);
}
ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared, program,
recv_scope);
@@ -210,18 +214,19 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor,
} // while(true)
}
-static void FillRequestCtx(detail::RequestHandler *h, framework::Scope *scope,
- platform::DeviceContext *dev_ctx,
- framework::Executor *executor,
- framework::ProgramDesc *program,
- framework::ExecutorPrepareContext *prefetch_ctx,
- detail::RPCServer *rpc_server) {
+static void FillRequestCtx(
+ detail::RequestHandler *h, framework::Scope *scope,
+ platform::DeviceContext *dev_ctx, framework::Executor *executor,
+ framework::ProgramDesc *program,
+ std::unordered_map>
+ *prefetch_ctx,
+ detail::RPCServer *rpc_server) {
h->SetScope(scope);
h->SetDevCtx(dev_ctx);
h->SetExecutor(executor);
h->SetProgram(program);
- h->SetPrefetchPreparedCtx(
- std::unique_ptr(prefetch_ctx));
+ h->SetPrefetchPreparedCtx(prefetch_ctx);
h->SetRPCServer(rpc_server);
}
@@ -255,17 +260,42 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope,
request_prefetch_handler_.get());
auto *optimize_block = Attr(kOptimizeBlock);
- auto *prefetch_block = Attr(kPrefetchBlock);
auto *program = optimize_block->Program();
framework::Executor executor(dev_place);
// prepare for prefetch
- VLOG(3) << "prefetch block id is " << prefetch_block->ID();
- auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID());
+ std::vector prefetch_block_id_list;
+ std::unordered_map block_id_to_prefetch_var_name;
+
+ auto prefetch_var_name_to_block_id_str =
+ Attr>(kPrefetchVarNameToBlockId);
+ for (const auto &prefetch_var_name_and_id :
+ prefetch_var_name_to_block_id_str) {
+ std::vector pieces;
+ split(prefetch_var_name_and_id, ':', &pieces);
+ VLOG(3) << "after split, prefetch_var = " << pieces[0]
+ << ", id=" << pieces[1];
+ PADDLE_ENFORCE_EQ(pieces.size(), 2);
+
+ int block_id = std::stoi(pieces[1]);
+ prefetch_block_id_list.push_back(block_id);
+ block_id_to_prefetch_var_name[block_id] = pieces[0];
+ }
+
+ auto prefetch_prepared = executor.Prepare(*program, prefetch_block_id_list);
+
+ std::unordered_map>
+ prefetch_var_name_to_prepared_ctx;
+ for (size_t i = 0; i < prefetch_block_id_list.size(); ++i) {
+ auto block_id = prefetch_block_id_list[i];
+ auto prefetch_var_name = block_id_to_prefetch_var_name[block_id];
+ prefetch_var_name_to_prepared_ctx[prefetch_var_name] = prefetch_prepared[i];
+ }
auto f = std::bind(FillRequestCtx, std::placeholders::_1, &recv_scope,
- &dev_ctx, &executor, program, prefetch_prepared.release(),
- rpc_service_.get());
+ &dev_ctx, &executor, program,
+ &prefetch_var_name_to_prepared_ctx, rpc_service_.get());
f(request_send_handler_.get());
f(request_get_handler_.get());
@@ -283,7 +313,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope,
// Write to a file of server selected port for python use.
SavePort();
if (sync_mode) {
- RunSyncLoop(&executor, program, &recv_scope, prefetch_block);
+ RunSyncLoop(&executor, program, &recv_scope, prefetch_block_id_list);
} else {
RunAsyncLoop(&executor, program);
}
@@ -309,8 +339,9 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true);
AddAttr(kOptimizeBlock,
"BlockID to run on server side.");
- AddAttr(kPrefetchBlock,
- "prefetch block to run on server side.");
+ AddAttr>(kPrefetchVarNameToBlockId,
+ "prefetch blocks to run on server side.")
+ .SetDefault({});
AddAttr("Fanin", "How many clients send to this server.")
.SetDefault(1);
}
diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h
index 87952cb0e683596b2b0395890b6e25b15f74d7e2..46c3a19e20b3f2dd970a672bb99f98e83d3e25bf 100644
--- a/paddle/fluid/operators/listen_and_serv_op.h
+++ b/paddle/fluid/operators/listen_and_serv_op.h
@@ -18,6 +18,7 @@ limitations under the License. */
#include
#include
#include
+#include
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/lod_tensor.h"
@@ -30,7 +31,7 @@ namespace paddle {
namespace operators {
constexpr char kOptimizeBlock[] = "OptimizeBlock";
-constexpr char kPrefetchBlock[] = "PrefetchBlock";
+constexpr char kPrefetchVarNameToBlockId[] = "prefetch_var_name_to_block_id";
void RunServer(std::shared_ptr service);
@@ -46,7 +47,7 @@ class ListenAndServOp : public framework::OperatorBase {
void RunSyncLoop(framework::Executor* executor,
framework::ProgramDesc* program,
framework::Scope* recv_scope,
- framework::BlockDesc* prefetch_block) const;
+ const std::vector& prefetch_block_id_list) const;
void RunAsyncLoop(framework::Executor* executor,
framework::ProgramDesc* program) const;
diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc
index cdbc975c02214721ceae3a338741101ef32d7ee9..aa19c62c83648814e86b1e7062424be3693e4b98 100644
--- a/paddle/fluid/operators/norm_op.cc
+++ b/paddle/fluid/operators/norm_op.cc
@@ -16,40 +16,34 @@ limitations under the License. */
namespace paddle {
namespace operators {
-template
class NormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
- AddInput(
- "X",
- "(Tensor) The input tensor of norm operator. "
- "The format of input tensor is NCHW. Where N is batch size, C is the "
- "number of channels, H and W is the height and width of feature.");
- AddInput("Scale",
- "(Tensor) The input tensor of norm operator. "
- "The format of input tensor is C * 1.");
- AddAttr("epsilon",
- "(float, default 1e-10) Constant "
- "for numerical stability.")
+ AddInput("X", "(Tensor) A tensor of rank >= axis.");
+ AddAttr("axis",
+ "The axis on which to apply normalization. If axis < 0, "
+ "the dimension to normalization is rank(X) + axis. -1 is "
+ "the last dimension.");
+ AddAttr("epsilon",
+ "(float, default 1e-10) The epsilon value is used "
+ "to avoid division by zero.")
.SetDefault(1.0e-10f);
- AddOutput("Out",
- "(Tensor) The output tensor of norm operator."
- "N * M."
- "M = C * H * W");
+ AddOutput("Norm",
+ "(Tensor) A tensor saved the `sqrt(sum(x) + epsion)` will "
+ "be used in backward kernel.")
+ .AsIntermediate();
+ AddOutput("Out", "(Tensor) A tensor of the same shape as X.");
AddComment(R"DOC(
- "Input shape: $(N, C, H, W)$
- Scale shape: $(C, 1)$
- Output shape: $(N, C, H, W)$
- Where
- forward
- $$
- [\frac {x_{1}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{2}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{3}}{\sqrt{\sum{x_{i}^{2}}}} \cdot \cdot \cdot \frac {x_{n}}{\sqrt{\sum{x_{i}^{2}}}}]
- $$
- backward
- $$
- \frac{\frac{\mathrm{d}L }{\mathrm{d}y_{1}} - \frac {x_{1}\sum {\frac{\mathrm{d} L}{\mathrm{d} y_{j}}}x_{j}}{\sum x_{j}^{2}} }{\sqrt{\sum{x_{j}^{2}}}}
- $$
- )DOC");
+
+Given a tensor, apply 2-normalization along the provided axis.
+
+$$
+y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
+$$
+
+where, $\sum {x^2}$ is calculated along the `axis` dimension.
+
+)DOC");
}
};
@@ -58,15 +52,15 @@ class NormOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
- "Input(X) of NormOp"
- "should not be null.");
- PADDLE_ENFORCE(ctx->HasInput("Scale"),
- "Input(Scale) of NormOp"
- "should not be null.");
+ "Input(X) of NormOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of NormOp should not be null.");
- auto in_x_dims = ctx->GetInputDim("X");
- ctx->SetOutputDim("Out", in_x_dims);
+ auto xdim = ctx->GetInputDim("X");
+ ctx->SetOutputDim("Out", xdim);
+ int axis = ctx->Attrs().Get("axis");
+ if (axis < 0) axis = xdim.size() + axis;
+ xdim[axis] = 1;
+ ctx->SetOutputDim("Norm", xdim);
}
};
@@ -84,12 +78,12 @@ class NormOpGrad : public framework::OperatorWithKernel {
} // namespace paddle
namespace ops = paddle::operators;
-REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker,
+using CPU = paddle::platform::CPUDeviceContext;
+
+REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker,
paddle::framework::DefaultGradOpDescMaker);
REGISTER_OPERATOR(norm_grad, ops::NormOpGrad);
-REGISTER_OP_CPU_KERNEL(
- norm, ops::NormKernel,
- ops::NormKernel);
-REGISTER_OP_CPU_KERNEL(
- norm_grad, ops::NormGradKernel,
- ops::NormGradKernel);
+REGISTER_OP_CPU_KERNEL(norm, ops::NormKernel,
+ ops::NormKernel);
+REGISTER_OP_CPU_KERNEL(norm_grad, ops::NormGradKernel,
+ ops::NormGradKernel);
diff --git a/paddle/fluid/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu
index d1d9be50742b54a3b6f068fd43ec4b16696183bf..1d0021d33ff9ee65c3366183466b94266e6c2999 100644
--- a/paddle/fluid/operators/norm_op.cu
+++ b/paddle/fluid/operators/norm_op.cu
@@ -16,9 +16,9 @@ limitations under the License. */
#include "paddle/fluid/operators/norm_op.h"
namespace ops = paddle::operators;
-REGISTER_OP_CUDA_KERNEL(
- norm, ops::NormKernel,
- ops::NormKernel);
-REGISTER_OP_CUDA_KERNEL(
- norm_grad, ops::NormGradKernel,
- ops::NormGradKernel);
+using CUDA = paddle::platform::CUDADeviceContext;
+
+REGISTER_OP_CUDA_KERNEL(norm, ops::NormKernel,
+ ops::NormKernel);
+REGISTER_OP_CUDA_KERNEL(norm_grad, ops::NormGradKernel,
+ ops::NormGradKernel);
diff --git a/paddle/fluid/operators/norm_op.h b/paddle/fluid/operators/norm_op.h
index 0ad29e8a0385c46a07842930378ed7a040564437..3167bdc8ac718b23435690577e4163826d14a332 100644
--- a/paddle/fluid/operators/norm_op.h
+++ b/paddle/fluid/operators/norm_op.h
@@ -19,156 +19,110 @@ limitations under the License. */
namespace paddle {
namespace operators {
-template
+inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n,
+ int* post) {
+ *pre = 1;
+ *post = 1;
+ *n = dim[axis];
+ for (int i = 0; i < axis; ++i) {
+ (*pre) *= dim[i];
+ }
+ for (int i = axis + 1; i < dim.size(); ++i) {
+ (*post) *= dim[i];
+ }
+}
+
+template
class NormKernel : public framework::OpKernel {
public:
- void Compute(const framework::ExecutionContext& context) const override {
- const framework::Tensor* in_x = context.Input("X");
- const framework::Tensor* scale = context.Input("Scale");
- auto* out = context.Output("Out");
- auto epsilon = static_cast(context.Attr("epsilon"));
- out->mutable_data(context.GetPlace());
- int batch_size = in_x->dims()[0];
- int channels = in_x->dims()[1];
- int height = in_x->dims()[2];
- int width = in_x->dims()[3];
- int fea_len = height * width;
- auto* place =
- context.template device_context().eigen_device();
- auto x =
- framework::EigenMatrix::From(
- *in_x, framework::make_ddim({batch_size, fea_len * channels}));
- // get square
- framework::Tensor x_square;
- x_square.mutable_data(in_x->dims(), context.GetPlace());
- auto x_square_eigen =
- framework::EigenMatrix::From(
- x_square, framework::make_ddim({batch_size, fea_len * channels}));
- x_square_eigen.device(*place) = x.square();
- auto scale_eigen =
- framework::EigenVector::Flatten(
- *scale);
- for (int n = 0; n < batch_size; ++n) {
- framework::Tensor in_x_batch = in_x->Slice(n, n + 1);
- auto in_x_batch_eigen =
- framework::EigenMatrix::From(
- in_x_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor x_square_batch = x_square.Slice(n, n + 1);
- auto x_square_batch_eigen =
- framework::EigenMatrix::From(
- x_square_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor out_batch = out->Slice(n, n + 1);
- auto out_batch_eigen =
- framework::EigenMatrix::From(
- out_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor tmp_tensor;
- tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}),
- context.GetPlace());
- auto tmp = framework::EigenVector::Flatten(tmp_tensor);
- // get colsum and sqrt , inverse
- auto dim = Eigen::array({{0}});
- tmp.device(*place) = x_square_batch_eigen.sum(dim);
- tmp.device(*place) = (tmp + epsilon).sqrt().inverse();
- Eigen::array broadcast_dim_col;
- broadcast_dim_col[1] = 1;
- broadcast_dim_col[0] = channels;
- out_batch_eigen.device(*place) =
- in_x_batch_eigen * (tmp.broadcast(broadcast_dim_col));
- Eigen::array broadcast_dim_row;
- broadcast_dim_row[1] = fea_len;
- broadcast_dim_row[0] = 1;
- out_batch_eigen.device(*place) =
- out_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row));
- }
+ void Compute(const framework::ExecutionContext& ctx) const override {
+ auto* in_x = ctx.Input("X");
+ auto* out_y = ctx.Output("Out");
+ auto* out_norm = ctx.Output("Norm");
+ out_y->mutable_data(ctx.GetPlace());
+ out_norm->mutable_data(ctx.GetPlace());
+
+ auto xdim = in_x->dims();
+ auto ndim = out_norm->dims();
+ T eps = static_cast(ctx.Attr("epsilon"));
+ int axis = ctx.Attr("axis");
+ if (axis < 0) axis = xdim.size() + axis;
+ int pre, n, post;
+ GetDims(xdim, axis, &pre, &n, &post);
+
+ auto* place = ctx.template device_context().eigen_device();
+
+ Eigen::DSizes shape(pre, n, post);
+ Eigen::DSizes norm_shape(pre, post);
+
+ auto x_e = framework::EigenVector::Flatten(*in_x);
+ auto y_e = framework::EigenVector::Flatten(*out_y);
+ auto norm_e = framework::EigenVector::Flatten(*out_norm);
+ auto x = x_e.reshape(shape);
+ auto y = y_e.reshape(shape);
+ auto norm = norm_e.reshape(norm_shape);
+
+ Eigen::DSizes rdim(1);
+ // y = x / sqrt((sum(x * x) + epsilon))
+ // norm = sqrt(sum(x * x) + epsilon)
+ auto sum = x.pow(2).sum(rdim) + eps;
+ norm.device(*place) = sum.sqrt();
+ // y = x / norm
+ Eigen::DSizes rshape(pre, 1, post);
+ Eigen::DSizes bcast(1, n, 1);
+ y.device(*place) = x / norm.reshape(rshape).broadcast(bcast);
}
};
template
class NormGradKernel : public framework::OpKernel {
public:
- void Compute(const framework::ExecutionContext& context) const override {
- const framework::Tensor* in_x = context.Input("X");
- const framework::Tensor* scale = context.Input("Scale");
- const framework::Tensor* out_grad =
- context.Input(framework::GradVarName("Out"));
- auto epsilon = static_cast(context.Attr("epsilon"));
- framework::Tensor* in_x_grad =
- context.Output(framework::GradVarName("X"));
- in_x_grad->mutable_data(context.GetPlace());
- int batch_size = in_x->dims()[0];
- int channels = in_x->dims()[1];
- int height = in_x->dims()[2];
- int width = in_x->dims()[3];
- int fea_len = height * width;
- auto* place =
- context.template device_context().eigen_device();
-
- auto scale_eigen =
- framework::EigenVector::Flatten(
- *scale);
- auto x =
- framework::EigenMatrix::From(
- *in_x, framework::make_ddim({batch_size, fea_len * channels}));
- // get square
- framework::Tensor x_square;
- x_square.mutable_data(in_x->dims(), context.GetPlace());
- auto x_square_eigen =
- framework::EigenMatrix::From(
- x_square, framework::make_ddim({batch_size, fea_len * channels}));
- x_square_eigen.device(*place) = x.square();
-
- for (int n = 0; n < batch_size; ++n) {
- framework::Tensor in_x_batch = in_x->Slice(n, n + 1);
- auto in_x_batch_eigen =
- framework::EigenMatrix::From(
- in_x_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor in_g_batch = in_x_grad->Slice(n, n + 1);
- auto in_g_batch_eigen =
- framework::EigenMatrix::From(
- in_g_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor x_square_batch = x_square.Slice(n, n + 1);
- auto x_square_batch_eigen =
- framework::EigenMatrix::From(
- x_square_batch, framework::make_ddim({channels, fea_len}));
- framework::Tensor outg_batch = out_grad->Slice(n, n + 1);
- auto outg_batch_eigen =
- framework::EigenMatrix::From(
- outg_batch, framework::make_ddim({channels, fea_len}));
-
- framework::Tensor tmp_tensor;
- tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}),
- context.GetPlace());
- auto tmp_eigen =
- framework::EigenVector::Flatten(tmp_tensor);
- auto dim = Eigen::array({{0}});
- tmp_eigen.device(*place) = (in_x_batch_eigen * outg_batch_eigen).sum(dim);
- framework::Tensor norm_tmp_tensor;
- norm_tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}),
- context.GetPlace());
- auto norm_tmp_eigen =
- framework::EigenVector::Flatten(norm_tmp_tensor);
- norm_tmp_eigen.device(*place) =
- (x_square_batch_eigen.sum(dim) + epsilon).sqrt();
- Eigen::array broadcast_dim_col;
- broadcast_dim_col[1] = 1;
- broadcast_dim_col[0] = channels;
- in_g_batch_eigen.device(*place) =
- in_x_batch_eigen * tmp_eigen.broadcast(broadcast_dim_col);
- in_g_batch_eigen.device(*place) =
- in_g_batch_eigen /
- (norm_tmp_eigen * norm_tmp_eigen).broadcast(broadcast_dim_col);
- in_g_batch_eigen.device(*place) = outg_batch_eigen - in_g_batch_eigen;
- // outg_batch_eigen + (in_g_batch_eigen * -1);
- in_g_batch_eigen.device(*place) =
- in_g_batch_eigen / norm_tmp_eigen.broadcast(broadcast_dim_col);
- Eigen::array broadcast_dim_row;
- broadcast_dim_row[1] = fea_len;
- broadcast_dim_row[0] = 1;
- in_g_batch_eigen.device(*place) =
- in_g_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row));
- }
+ void Compute(const framework::ExecutionContext& ctx) const override {
+ auto* in_x = ctx.Input("X");
+ auto* in_norm = ctx.Input("Norm");
+ auto* in_dy = ctx.Input(framework::GradVarName("Out"));
+ auto* out_dx = ctx.Output(framework::GradVarName("X"));
+ out_dx->mutable_data(ctx.GetPlace());
+
+ auto xdim = in_x->dims();
+ int axis = ctx.Attr("axis");
+ if (axis < 0) axis = xdim.size() + axis;
+ int pre, n, post;
+ GetDims(xdim, axis, &pre, &n, &post);
+
+ auto* place = ctx.template device_context().eigen_device();
+
+ auto x_e = framework::EigenVector::Flatten(*in_x);
+ auto dy_e = framework::EigenVector::Flatten(*in_dy);
+ auto norm_e = framework::EigenVector::Flatten(*in_norm);
+ auto dx_e = framework::EigenVector::Flatten(*out_dx);
+
+ Eigen::DSizes shape(pre, n, post);
+ Eigen::DSizes norm_shape(pre, post);
+ auto x = x_e.reshape(shape);
+ auto dy = dy_e.reshape(shape);
+ auto norm = norm_e.reshape(norm_shape);
+ auto dx = dx_e.reshape(shape);
+
+ framework::Tensor rsum;
+ rsum.mutable_data({pre, post}, ctx.GetPlace());
+ auto sum = framework::EigenTensor::From(rsum);
+
+ Eigen::DSizes rdim(1);
+ Eigen::DSizes bcast(1, n, 1);
+ Eigen::DSizes rshape(pre, 1, post);
+
+ // dx = ( dy/sqrt(sum(x*x)) ) * [1 - x*sum(x) / (sum(x*x) + e)]
+ // = [dy - dy * x * sum(x) / (sum(x*x) + e)] / sqrt(sum(x*x))
+ // = [dy - x * sum(x*dy) / (sum(x*x) + e)] / sqrt(sum(x*x))
+ // 1. sum = sum(x*dy)
+ sum.device(*place) = (x * dy).sum(rdim);
+ // 2. dx = x * sum
+ dx.device(*place) = sum.reshape(rshape).broadcast(bcast) * x;
+ // 3. dx / (sum(x*x) + e)
+ // where, norm.pow(2) = sum(x*x) + e, which is calculated in forward.
+ dx.device(*place) = dx / norm.pow(2).broadcast(bcast);
+ // 4. [dy - dx] / sqrt(sum(x*x))
+ dx.device(*place) = (dy - dx) / norm.broadcast(bcast);
}
};
} // namespace operators
diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc
new file mode 100644
index 0000000000000000000000000000000000000000..61bb445e8b4c6a71e9b1a6a0bcf02a31ab271d0a
--- /dev/null
+++ b/paddle/fluid/operators/slice_op.cc
@@ -0,0 +1,130 @@
+/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/fluid/operators/slice_op.h"
+#include
+#include
+
+namespace paddle {
+namespace operators {
+
+using Tensor = framework::Tensor;
+
+class SliceOp : public framework::OperatorWithKernel {
+ public:
+ using framework::OperatorWithKernel::OperatorWithKernel;
+
+ void InferShape(framework::InferShapeContext *ctx) const override {
+ PADDLE_ENFORCE(ctx->HasInput("Input"),
+ "Input (Input) of slice op should not be null.");
+ PADDLE_ENFORCE(ctx->HasOutput("Out"),
+ "Output (Out) of slice op should not be null.");
+
+ auto in_dims = ctx->GetInputDim("Input");
+ PADDLE_ENFORCE(in_dims.size() < 7,
+ "The rank of input should be less than 7.");
+ framework::DDim out_dims(in_dims);
+ auto axes = ctx->Attrs().Get>("axes");
+ auto starts = ctx->Attrs().Get>("starts");
+ auto ends = ctx->Attrs().Get>("ends");
+
+ PADDLE_ENFORCE_EQ(starts.size(), ends.size());
+ PADDLE_ENFORCE_EQ(starts.size(), axes.size());
+ int dim_value, start, end;
+ for (size_t i = 0; i < axes.size(); ++i) {
+ dim_value = out_dims[axes[i]];
+ start = starts[i] < 0 ? (starts[i] + dim_value) : starts[i];
+ end = ends[i] < 0 ? (ends[i] + dim_value) : ends[i];
+ start = std::max(start, 0);
+ end = std::max(end, 0);
+ start = std::min(start, dim_value);
+ end = std::min(end, dim_value);
+ start = std::min(start, end);
+ out_dims[axes[i]] = end - start;
+ }
+ ctx->SetOutputDim("Out", out_dims);
+ }
+
+ protected:
+ framework::OpKernelType GetExpectedKernelType(
+ const framework::ExecutionContext &ctx) const override {
+ return framework::OpKernelType(
+ framework::ToDataType(ctx.Input("Input")->type()),
+ ctx.GetPlace());
+ }
+};
+
+class SliceOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+ void Make() override {
+ AddInput("Input", "Tensor of data to extract slices from.");
+ AddOutput("Out", "Sliced data tensor.");
+
+ AddAttr>(
+ "axes",
+ "(list) Axes that `starts` and `ends` apply to. It's optional."
+ "If not present, will be treated as [0, 1, ..., len(`starts`) - 1].");
+ AddAttr>(
+ "starts",
+ "(list) Starting indices of corresponding axis in `axes`");
+ AddAttr>(
+ "ends",
+ "(list) Starting indices of corresponding axis in `axes`.");
+
+ AddComment(R"DOC(
+Slice Operator.
+
+Produces a slice of the input tensor along multiple axes. Similar to numpy:
+https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
+Slice uses `axes`, `starts` and `ends` attributes to specify the start and
+end dimension for each axis in the list of axes, it uses this information
+to slice the input data tensor. If a negative value is passed for any of
+the start or end indices, it represents number of elements before the end
+of that dimension. If the value passed to start or end is larger than
+the n (the number of elements in this dimension), it represents n.
+For slicing to the end of a dimension with unknown size, it is recommended
+to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1].
+
+ Example 1:
+ Given:
+ data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
+ axes = [0, 1]
+ starts = [1, 0]
+ ends = [2, 3]
+ Then:
+ result = [ [5, 6, 7], ]
+
+ Example 2:
+ Given:
+ data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
+ starts = [0, 1]
+ ends = [-1, 1000]
+ Then:
+ result = [ [2, 3, 4], ]
+)DOC");
+ }
+};
+
+} // namespace operators
+} // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker,
+ paddle::framework::EmptyGradOpMaker);
+
+REGISTER_OP_CPU_KERNEL(
+ slice, ops::SliceKernel,
+ ops::SliceKernel,
+ ops::SliceKernel,
+ ops::SliceKernel);
diff --git a/paddle/fluid/operators/slice_op.cu b/paddle/fluid/operators/slice_op.cu
new file mode 100644
index 0000000000000000000000000000000000000000..8c1767c70b19d1386af9610ef3405eb487a39878
--- /dev/null
+++ b/paddle/fluid/operators/slice_op.cu
@@ -0,0 +1,22 @@
+/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/fluid/operators/slice_op.h"
+
+namespace ops = paddle::operators;
+REGISTER_OP_CUDA_KERNEL(
+ slice, ops::SliceKernel,
+ ops::SliceKernel,
+ ops::SliceKernel,
+ ops::SliceKernel);
diff --git a/paddle/fluid/operators/slice_op.h b/paddle/fluid/operators/slice_op.h
new file mode 100644
index 0000000000000000000000000000000000000000..ba231aee176564b91a642912ce0b32bcdef8cfc1
--- /dev/null
+++ b/paddle/fluid/operators/slice_op.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#include
+#include
+#include "paddle/fluid/framework/op_registry.h"
+
+namespace paddle {
+namespace operators {
+
+template
+class SliceKernel : public framework::OpKernel {
+ public:
+ void Compute(const framework::ExecutionContext& ctx) const override {
+ int rank = ctx.Input("Input")->dims().size();
+ switch (rank) {
+ case 1:
+ SliceCompute<1>(ctx);
+ break;
+ case 2:
+ SliceCompute<2>(ctx);
+ break;
+ case 3:
+ SliceCompute<3>(ctx);
+ break;
+ case 4:
+ SliceCompute<4>(ctx);
+ break;
+ case 5:
+ SliceCompute<5>(ctx);
+ break;
+ case 6:
+ SliceCompute<6>(ctx);
+ break;
+ }
+ }
+
+ private:
+ template
+ void SliceCompute(const framework::ExecutionContext& context) const {
+ auto& place =
+ *context.template device_context().eigen_device();
+ auto in = context.Input("Input");
+ auto out = context.Output("Out");
+ out->mutable_data(context.GetPlace());
+ auto out_dims = out->dims();
+ auto in_dims = in->dims();
+ auto axes = context.Attr>("axes");
+ auto starts = context.Attr>("starts");
+
+ auto offsets = Eigen::array();
+ auto extents = Eigen::array();
+ for (size_t i = 0; i < D; ++i) {
+ offsets[i] = 0;
+ extents[i] = out_dims[i];
+ }
+ int start;
+ for (size_t i = 0; i < axes.size(); ++i) {
+ start = starts[i];
+ if (start < 0) {
+ start = (start + in_dims[axes[i]]);
+ }
+ start = std::max(start, 0);
+ offsets[axes[i]] = start;
+ }
+ auto in_t =
+ framework::EigenTensor::From(
+ *in);
+ auto out_t =
+ framework::EigenTensor::From(
+ *out);
+ out_t.device(place) = in_t.slice(offsets, extents);
+ }
+};
+} // namespace operators
+} // namespace paddle
diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc
index c88fbef63cf26c671246b15ea9872da0e7a92c1a..bd5c613f8cf794df5dfeb7517ed4350f9b3b6099 100644
--- a/paddle/fluid/pybind/pybind.cc
+++ b/paddle/fluid/pybind/pybind.cc
@@ -413,6 +413,9 @@ All parameter, weight, gradient are variables in Paddle.
py::class_(m, "Executor")
.def(py::init())
+#ifdef PADDLE_WITH_DISTRIBUTE
+ .def("complete", &Executor::Complete)
+#endif
.def("run",
(void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
Executor::Run);
diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh
index 55959197e7cd82253fb0c604604b4302ca0a3dc7..c6eef8683de8a4ab6c29940351ae914456a0c66f 100755
--- a/paddle/scripts/paddle_build.sh
+++ b/paddle/scripts/paddle_build.sh
@@ -181,6 +181,7 @@ function build() {
============================================
EOF
make clean
+ make -j `nproc`
make install -j `nproc`
}
diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py
index b9ea74fc81e0eb9b52e9cd1e9af8cba005a10f21..c8cbb5ef00b7dac4ae3f833d3d98653e17bee2ab 100644
--- a/python/paddle/fluid/layers/nn.py
+++ b/python/paddle/fluid/layers/nn.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
-All layers just related to the neural network.
+All layers just related to the neural network.
"""
from ..layer_helper import LayerHelper
@@ -95,7 +95,6 @@ def fc(input,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
- use_cudnn=False,
use_mkldnn=False,
act=None,
is_test=False,
@@ -222,6 +221,7 @@ def embedding(input,
have two elements which indicate the size of the dictionary of
embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update.
+ is_distributed (bool): Whether to run lookup table from remote parameter server.
padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.
Otherwise the given :attr:`padding_idx` indicates padding the output
with zeros whenever lookup encounters it in :attr:`input`. If
@@ -654,8 +654,9 @@ def dynamic_gru(input,
:attr:`False`.
gate_activation(str): The activation for update gate and reset gate.
Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
- activation(str): The activation for candidate hidden state.
+ candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
+ h_0 (Variable): The hidden output of the first time step.
Returns:
Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \
@@ -873,6 +874,13 @@ def cos_sim(X, Y):
"""
This function performs the cosine similarity between two tensors
X and Y and returns that as the output.
+
+ Args:
+ X (Variable): The input X.
+ Y (Variable): The input Y.
+
+ Returns:
+ Variable: the output of cosine(X, Y).
"""
helper = LayerHelper('cos_sim', **locals())
out = helper.create_tmp_variable(dtype=X.dtype)
@@ -899,15 +907,15 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
unchanged.
Args:
- x(variable): The input tensor.
- dropout_prob(float): Probability of setting units to zero.
- is_test(bool): A flag indicating whether it is in test phrase or not.
- seed(int): A Python integer used to create random seeds. If this
- parameter is set to None, a random seed is used.
- NOTE: If an integer seed is given, always the same output
- units will be dropped. DO NOT use a fixed seed in training.
- name(str|None): A name for this layer(optional). If set None, the layer
- will be named automatically.
+ x (Variable): The input tensor.
+ dropout_prob (float): Probability of setting units to zero.
+ is_test (bool): A flag indicating whether it is in test phrase or not.
+ seed (int): A Python integer used to create random seeds. If this
+ parameter is set to None, a random seed is used.
+ NOTE: If an integer seed is given, always the same output
+ units will be dropped. DO NOT use a fixed seed in training.
+ name (str|None): A name for this layer(optional). If set None, the layer
+ will be named automatically.
Returns:
Variable: A tensor variable.
@@ -1029,8 +1037,8 @@ def square_error_cost(input, label):
* :math:`Out`: Output value, same shape with :math:`X`.
Args:
- input(Variable): Input tensor, has predictions.
- label(Variable): Label tensor, has target labels.
+ input (Variable): Input tensor, has predictions.
+ label (Variable): Label tensor, has target labels.
Returns:
Variable: The tensor variable storing the element-wise squared error \
@@ -1059,6 +1067,7 @@ def square_error_cost(input, label):
return square_out
+@templatedoc()
def chunk_eval(input,
label,
chunk_scheme,
@@ -1067,6 +1076,18 @@ def chunk_eval(input,
"""
This function computes and outputs the precision, recall and
F1-score of chunk detection.
+
+ Args:
+ input (Variable): prediction output of the network.
+ label (Variable): label of the test data set.
+ chunk_scheme (str): ${chunk_scheme_comment}
+ num_chunk_types (int): ${num_chunk_types_comment}
+ excluded_chunk_types (list): ${excluded_chunk_types_comment}
+
+ Returns:
+ tuple: tuple containing: (precision, recall, f1_score,
+ num_infer_chunks, num_label_chunks,
+ num_correct_chunks)
"""
helper = LayerHelper("chunk_eval", **locals())
@@ -1099,6 +1120,7 @@ def chunk_eval(input,
num_correct_chunks)
+@templatedoc()
def sequence_conv(input,
num_filters,
filter_size=3,
@@ -1111,6 +1133,19 @@ def sequence_conv(input,
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
+
+ Args:
+ input (Variable): ${x_comment}
+ num_filters (int): number of filters.
+ filter_size (int): the filter size (H and W).
+ filter_stride (int): stride of the filter.
+ padding (bool): if True, add paddings.
+ bias_attr (ParamAttr|None): attributes for bias
+ param_attr (ParamAttr|None): attributes for parameter
+ act (str): the activation type
+
+ Returns:
+ Variable: output of sequence_conv
"""
# FIXME(dzh) : want to unify the argument of python layer
@@ -1225,33 +1260,34 @@ def conv2d(input,
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
- input(Variable): The input image with [N, C, H, W] format.
- num_filters(int): The number of filter. It is as same as the output
- image channel.
- filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
- it must contain two integers, (filter_size_H, filter_size_W).
- Otherwise, the filter will be a square.
- stride(int|tuple): The stride size. If stride is a tuple, it must
- contain two integers, (stride_H, stride_W). Otherwise, the
- stride_H = stride_W = stride. Default: stride = 1.
- padding(int|tuple): The padding size. If padding is a tuple, it must
- contain two integers, (padding_H, padding_W). Otherwise, the
- padding_H = padding_W = padding. Default: padding = 0.
- dilation(int|tuple): The dilation size. If dilation is a tuple, it must
- contain two integers, (dilation_H, dilation_W). Otherwise, the
- dilation_H = dilation_W = dilation. Default: dilation = 1.
- groups(int): The groups number of the Conv2d Layer. According to grouped
- convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
- the first half of the filters is only connected to the first half
- of the input channels, while the second half of the filters is only
- connected to the second half of the input channels. Default: groups=1
- param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None
- bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
- use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
- library is installed. Default: True
- act(str): Activation type. Default: None
- name(str|None): A name for this layer(optional). If set None, the layer
- will be named automatically.
+ input (Variable): The input image with [N, C, H, W] format.
+ num_filters(int): The number of filter. It is as same as the output
+ image channel.
+ filter_size (int|tuple|None): The filter size. If filter_size is a tuple,
+ it must contain two integers, (filter_size_H, filter_size_W).
+ Otherwise, the filter will be a square.
+ stride (int|tuple): The stride size. If stride is a tuple, it must
+ contain two integers, (stride_H, stride_W). Otherwise, the
+ stride_H = stride_W = stride. Default: stride = 1.
+ padding (int|tuple): The padding size. If padding is a tuple, it must
+ contain two integers, (padding_H, padding_W). Otherwise, the
+ padding_H = padding_W = padding. Default: padding = 0.
+ dilation (int|tuple): The dilation size. If dilation is a tuple, it must
+ contain two integers, (dilation_H, dilation_W). Otherwise, the
+ dilation_H = dilation_W = dilation. Default: dilation = 1.
+ groups (int): The groups number of the Conv2d Layer. According to grouped
+ convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
+ the first half of the filters is only connected to the first half
+ of the input channels, while the second half of the filters is only
+ connected to the second half of the input channels. Default: groups=1
+ param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None
+ bias_attr (ParamAttr): Bias parameter for the Conv2d layer. Default: None
+ use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
+ library is installed. Default: True
+ use_mkldnn (bool): Use mkldnn kernels or not.
+ act (str): Activation type. Default: None
+ name (str|None): A name for this layer(optional). If set None, the layer
+ will be named automatically.
Returns:
Variable: The tensor variable storing the convolution and \
@@ -1409,7 +1445,7 @@ def sequence_pool(input, pool_type):
def sequence_first_step(input):
"""
- This funciton get the first step of sequence.
+ This function gets the first step of sequence.
.. code-block:: text
@@ -1442,7 +1478,7 @@ def sequence_first_step(input):
def sequence_last_step(input):
"""
- This funciton get the last step of sequence.
+ This function gets the last step of sequence.
.. code-block:: text
@@ -1486,6 +1522,22 @@ def pool2d(input,
"""
This function adds the operator for pooling in 2 dimensions, using the
pooling configurations mentioned in input parameters.
+
+ Args:
+ input (Variable): ${input_comment}
+ pool_size (int): ${ksize_comment}
+ pool_type (str): ${pooling_type_comment}
+ pool_stride (int): stride of the pooling layer.
+ pool_padding (int): padding size.
+ global_pooling (bool): ${global_pooling_comment}
+ use_cudnn (bool): ${use_cudnn_comment}
+ ceil_mode (bool): ${ceil_mode_comment}
+ use_mkldnn (bool): ${use_mkldnn_comment}
+ name (str): A name for this layer(optional). If set None, the layer
+ will be named automatically.
+
+ Returns:
+ Variable: output of pool2d layer.
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
@@ -1543,6 +1595,25 @@ def batch_norm(input,
"""
This function helps create an operator to implement
the BatchNorm layer using the configurations from the input parameters.
+
+ Args:
+ input (Variable): the input variable.
+ act (str): activation type
+ is_test (bool): whether to run batch_norm as test mode.
+ momentum (float): momentum
+ epsilon (float): epsilon, default 1e-05
+ param_attr (ParamAttr|None): attributes for parameter
+ bias_attr (ParamAttr|None): attributes for bias
+ data_layout (str): data layout, default NCHW
+ in_place (bool): if True, do not create tmp variable
+ use_mkldnn (bool): ${use_mkldnn_comment}
+ name (str): The name of this layer. It is optional.
+ moving_mean_name (str): The name of moving mean variable name, optional.
+ moving_variance_name (str): The name of moving variance name, optional.
+ do_model_average_for_mean_and_var (bool):
+
+ Returns:
+ Variable: output of batch_norm layer.
"""
helper = LayerHelper('batch_norm', **locals())
dtype = helper.input_dtype()
@@ -1670,6 +1741,7 @@ def layer_norm(input,
bias_attr(ParamAttr|None): The parameter attribute for the learnable
bias :math:`b`.
act(str): Activation to be applied to the output of layer normalizaiton.
+ name (str): The name of this layer. It is optional.
Returns:
Variable: A tensor variable with the same shape as the input.
@@ -1721,6 +1793,17 @@ def layer_norm(input,
def beam_search_decode(ids, scores, name=None):
+ """
+ ${beam_search_decode}
+
+ Args:
+ ids (Variable): ${ids_comment}
+ scores (Variable): ${scores_comment}
+ name (str): The name of this layer. It is optional.
+
+ Returns:
+ tuple: a tuple of two output variable: sentence_ids, sentence_scores
+ """
helper = LayerHelper('beam_search_decode', **locals())
sentence_ids = helper.create_tmp_variable(dtype=ids.dtype)
sentence_scores = helper.create_tmp_variable(dtype=ids.dtype)
@@ -1796,46 +1879,46 @@ def conv2d_transpose(input,
W_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1
Args:
- input(Variable): The input image with [N, C, H, W] format.
- num_filters(int): The number of the filter. It is as same as the output
- image channel.
- output_size(int|tuple|None): The output image size. If output size is a
- tuple, it must contain two integers, (image_H, image_W). This
- parameter only works when filter_size is None.
- filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
- it must contain two integers, (filter_size_H, filter_size_W).
- Otherwise, the filter will be a square. None if use output size to
- calculate filter_size.
- padding(int|tuple): The padding size. If padding is a tuple, it must
- contain two integers, (padding_H, padding_W). Otherwise, the
- padding_H = padding_W = padding. Default: padding = 0.
- stride(int|tuple): The stride size. If stride is a tuple, it must
- contain two integers, (stride_H, stride_W). Otherwise, the
- stride_H = stride_W = stride. Default: stride = 1.
- dilation(int|tuple): The dilation size. If dilation is a tuple, it must
- contain two integers, (dilation_H, dilation_W). Otherwise, the
- dilation_H = dilation_W = dilation. Default: dilation = 1.
- groups(int): The groups number of the Conv2d transpose layer. Inspired by
- grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
- when group=2, the first half of the filters is only connected to the
- first half of the input channels, while the second half of the
- filters is only connected to the second half of the input channels.
- Default: groups=1
- param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer.
- Default: None
- bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
- use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
- library is installed. Default: True
- act(str): Activation type. Default: None
- name(str|None): A name for this layer(optional). If set None, the layer
- will be named automatically.
+ input(Variable): The input image with [N, C, H, W] format.
+ num_filters(int): The number of the filter. It is as same as the output
+ image channel.
+ output_size(int|tuple|None): The output image size. If output size is a
+ tuple, it must contain two integers, (image_H, image_W). This
+ parameter only works when filter_size is None.
+ filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
+ it must contain two integers, (filter_size_H, filter_size_W).
+ Otherwise, the filter will be a square. None if use output size to
+ calculate filter_size.
+ padding(int|tuple): The padding size. If padding is a tuple, it must
+ contain two integers, (padding_H, padding_W). Otherwise, the
+ padding_H = padding_W = padding. Default: padding = 0.
+ stride(int|tuple): The stride size. If stride is a tuple, it must
+ contain two integers, (stride_H, stride_W). Otherwise, the
+ stride_H = stride_W = stride. Default: stride = 1.
+ dilation(int|tuple): The dilation size. If dilation is a tuple, it must
+ contain two integers, (dilation_H, dilation_W). Otherwise, the
+ dilation_H = dilation_W = dilation. Default: dilation = 1.
+ groups(int): The groups number of the Conv2d transpose layer. Inspired by
+ grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
+ when group=2, the first half of the filters is only connected to the
+ first half of the input channels, while the second half of the
+ filters is only connected to the second half of the input channels.
+ Default: groups=1
+ param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer.
+ Default: None
+ bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
+ use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
+ library is installed. Default: True
+ act(str): Activation type. Default: None
+ name(str|None): A name for this layer(optional). If set None, the layer
+ will be named automatically.
Returns:
- Variable: The tensor variable storing the convolution transpose result.
+ Variable: The tensor variable storing the convolution transpose result.
Raises:
- ValueError: If the shapes of input, filter_size, stride, padding and
- groups mismatch.
+ ValueError: If the shapes of input, filter_size, stride, padding and
+ groups mismatch.
Examples:
.. code-block:: python
@@ -1972,6 +2055,17 @@ def sequence_expand(x, y, ref_level=-1, name=None):
def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0):
'''
This function implements the beam search algorithm.
+
+ Args:
+ pre_ids (Variable): ${pre_ids_comment}
+ ids (Variable): ${ids_comment}
+ scores (Variable): ${scores_comment}
+ beam_size (int): ${beam_size_comment}
+ end_id (int): ${end_id_comment}
+ level (int): ${level_comment}
+
+ Returns:
+ tuple: a tuple of beam_search output variables: selected_ids, selected_scores
'''
helper = LayerHelper('beam_search', **locals())
score_type = scores.dtype
@@ -2467,19 +2561,21 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
The l2 normalize layer normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
- output = x / sqrt(max(sum(x**2), epsilon))
+ .. math::
+ y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
- x(Variable|list): The input tensor to l2_normalize layer.
- axis(int): Dimension along which to normalize the input.
- epsilon(float): A lower bound value for `x`'s l2 norm. sqrt(epsilon) will
- be used as the divisor if the l2 norm of `x` is less than
- sqrt(epsilon).
- name(str|None): A name for this layer(optional). If set None, the layer
- will be named automatically.
+ x(Variable|list): The input tensor to l2_normalize layer.
+ axis(int): The axis on which to apply normalization. If `axis < 0`,
+ the dimension to normalization is rank(X) + axis. -1 is the
+ last dimension.
+ epsilon(float): The epsilon value is used to avoid division by zero,
+ the defalut value is 1e-10.
+ name(str|None): A name for this layer(optional). If set None, the layer
+ will be named automatically.
Returns:
@@ -2498,46 +2594,17 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
axis = 0
helper = LayerHelper("l2_normalize", **locals())
- square = helper.create_tmp_variable(dtype=x.dtype)
- helper.append_op(type="square", inputs={"X": x}, outputs={"Out": square})
-
- reduced_sum = helper.create_tmp_variable(dtype=x.dtype)
+ out = helper.create_tmp_variable(dtype=x.dtype)
+ norm = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
- type="reduce_sum",
- inputs={"X": square},
- outputs={"Out": reduced_sum},
+ type="norm",
+ inputs={"X": x},
+ outputs={"Out": out,
+ "Norm": norm},
attrs={
- "dim": [1] if axis is None else [axis],
- "keep_dim": True,
- "reduce_all": False
+ "axis": 1 if axis is None else axis,
+ "epsilon": epsilon,
})
-
- # TODO(caoying) A lower bound value epsilon for the norm is needed to
- # imporve the numeric stability of reciprocal. This requires a maximum_op.
- rsquare = helper.create_tmp_variable(dtype=x.dtype)
- helper.append_op(
- type="reciprocal", inputs={"X": reduced_sum}, outputs={"Out": rsquare})
-
- # TODO(caoying) the current elementwise_mul operator does not support a
- # general broadcast rule which broadcasts input(Y) to have the same
- # dimension with Input(X) starting from a specified dimension. So this
- # exanpsion is requred. Once a general broadcast rule is spported, this
- # expanding canbe removed.
- rsquare_expanded = helper.create_tmp_variable(dtype=x.dtype)
- expand_times = [1] * len(x.shape)
- expand_times[axis] = int(x.shape[axis])
- helper.append_op(
- type="expand",
- inputs={"X": rsquare},
- outputs={"Out": rsquare_expanded},
- attrs={"expand_times": expand_times})
-
- out = helper.create_tmp_variable(dtype=x.dtype)
- helper.append_op(
- type="elementwise_mul",
- inputs={"X": x,
- "Y": rsquare_expanded},
- outputs={"Out": out})
return out
@@ -2721,16 +2788,13 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None,
the edit distance will be divided by the length of reference string.
Args:
-
input(Variable): The indices for hypothesis strings.
-
label(Variable): The indices for reference strings.
-
normalized(bool): Indicated whether to normalize the edit distance by
the length of reference string.
-
ignored_tokens(list of int): Tokens that should be removed before
calculating edit distance.
+ name (str): The name of this layer. It is optional.
Returns:
Variable: sequence-to-sequence edit distance in shape [batch_size, 1].
@@ -2820,10 +2884,10 @@ def ctc_greedy_decoder(input, blank, name=None):
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. (not
including the blank label).
-
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in thehalf-opened
interval [0, num_classes + 1).
+ name (str): The name of this layer. It is optional.
Returns:
Variable: CTC greedy decode result. If all the sequences in result were
@@ -2860,23 +2924,23 @@ def warpctc(input, label, blank=0, norm_by_times=False):
input tensor.
Args:
- input(Variable): (LodTensor, default: LoDTensor),
- the unscaled probabilities of variable-length sequences,
- which is a 2-D Tensor with LoD information.
- It's shape is [Lp, num_classes + 1], where Lp is the sum of all input
- sequences' length and num_classes is the true number of classes.
- (not including the blank label).
- label(Variable): (LodTensor, default: LoDTensor), the ground truth
- of variable-length sequence, which is a 2-D Tensor with LoD
- information. It is of the shape [Lg, 1], where Lg is th sum of
- all labels' length.
- blank: (int, default: 0), the blank label index of Connectionist
- Temporal Classification (CTC) loss, which is in the
- half-opened interval [0, num_classes + 1).
- norm_by_times: (bool, default: false), whether to normalize
- the gradients by the number of time-step, which is also the
- sequence's length. There is no need to normalize the gradients
- if warpctc layer was follewed by a mean_op.
+ input(Variable): (LodTensor, default: LoDTensor),
+ the unscaled probabilities of variable-length sequences,
+ which is a 2-D Tensor with LoD information.
+ It's shape is [Lp, num_classes + 1], where Lp is the sum of all input
+ sequences' length and num_classes is the true number of classes.
+ (not including the blank label).
+ label(Variable): (LodTensor, default: LoDTensor), the ground truth
+ of variable-length sequence, which is a 2-D Tensor with LoD
+ information. It is of the shape [Lg, 1], where Lg is th sum of
+ all labels' length.
+ blank (int): default 0, the blank label index of Connectionist
+ Temporal Classification (CTC) loss, which is in the
+ half-opened interval [0, num_classes + 1).
+ norm_by_times (bool): default false, whether to normalize
+ the gradients by the number of time-step, which is also the
+ sequence's length. There is no need to normalize the gradients
+ if warpctc layer was follewed by a mean_op.
Returns:
Variable: The Connectionist Temporal Classification (CTC) loss,
@@ -2935,9 +2999,9 @@ def sequence_reshape(input, new_dim):
no remainder for each sequence.
Args:
- input (Variable): (LodTensor, default: LoDTensor), a 2-D LoDTensor
- with shape being [N, M] where M for dimension.
- new_dim (int): New dimension which the input LoDTensor is reshaped to.
+ input (Variable): (LodTensor, default: LoDTensor), a 2-D LoDTensor
+ with shape being [N, M] where M for dimension.
+ new_dim (int): New dimension which the input LoDTensor is reshaped to.
Returns:
Variable: Reshaped LoDTensor according to new dimension.
@@ -2959,7 +3023,10 @@ def sequence_reshape(input, new_dim):
return out
-@autodoc()
+# FIXME(wuyi): let docstring_checker.py understand @autodoc.
+# For now, the comments in c++ use types like Tensor, but in python side
+# the type is often "Variable", and arguments may vary.
+@templatedoc(op_type="nce")
def nce(input,
label,
num_total_classes,
@@ -2967,6 +3034,21 @@ def nce(input,
param_attr=None,
bias_attr=None,
num_neg_samples=None):
+ """
+ ${comment}
+
+ Args:
+ input (Variable): input variable.
+ label (Variable): label.
+ num_total_classes (int):${num_total_classes_comment}
+ sample_weight (int): ${sample_weight_comment}
+ param_attr (ParamAttr|None): attributes for parameter
+ bias_attr (ParamAttr|None): attributes for bias
+ num_neg_samples (int): ${num_neg_samples_comment}
+
+ Returns:
+ Variable: output of nce layer.
+ """
helper = LayerHelper('nce', **locals())
assert isinstance(input, Variable)
dim = input.shape[1]
@@ -3024,8 +3106,9 @@ def transpose(x, perm, name=None):
perm[i]-th dimension of `input`.
Args:
- input (Variable): (Tensor), A Tensor.
- perm (list): A permutation of the dimensions of `input`.
+ x (Variable): The input Tensor.
+ perm (list): A permutation of the dimensions of `input`.
+ name (str): The name of this layer. It is optional.
Returns:
Variable: A transposed Tensor.
@@ -3258,9 +3341,9 @@ def multiplex(inputs, index):
row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`.
Args:
- inputs (list): A list of variables to gather from. All variables have the
+ inputs (list): A list of variables to gather from. All variables have the
same shape and the rank is at least 2.
- index (Variable): Tensor, index variable which is a 2-D tensor
+ index (Variable): Tensor, index variable which is a 2-D tensor
with shape [M, 1] where M is the batch size.
Returns:
@@ -3459,7 +3542,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
begin(int): The first value of this counter.
step(int): The increment step between each execution.
- Returns(Variable): The global run counter.
+ Returns:
+ Variable: The global run counter.
"""
helper = LayerHelper('global_step_counter')
if counter_name is None:
@@ -3520,7 +3604,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
the corresponding dimension of x.
Args:
- input(variable): The input tensor.
+ x(variable): The input tensor.
shape(list): The new shape. At most one dimension of the new shape can
be -1.
actual_shape(variable): An optional input. If provided, reshape
@@ -3532,8 +3616,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
inplace(bool): If this flag is set true, a new output tensor is created
whose data is copied from input x, otherwise the output
shares data with input without copying.
+ name (str): The name of this layer. It is optional.
- Returns(variable): The output tensor.
+ Returns:
+ Variable: The output tensor.
Examples:
.. code-block:: python
@@ -4054,7 +4140,6 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None):
name(str|None): The output variable name.
Returns:
-
${out_comment}.
"""
@@ -4073,6 +4158,7 @@ def image_resize_short(input, out_short_len, resample='BILINEAR'):
This is a 4-D tensor of the shape
(num_batches, channels, in_h, in_w).
out_short_len(int): The length of output images' short edge.
+ resample (str): resample method, default: BILINEAR.
Returns:
out (Variable): The output is a 4-D tensor of the shape
diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py
index 3260f81e9edcd9ed83e98a681c43a5d9dbfd1312..98f169e8f0881fbba6aecb45b43a52c8fd51132d 100644
--- a/python/paddle/fluid/layers/ops.py
+++ b/python/paddle/fluid/layers/ops.py
@@ -71,6 +71,7 @@ __all__ = [
'cumsum',
'scatter',
'sum',
+ 'slice',
'polygon_box_transform',
'shape',
'maxout',
diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py
index 8b0ebe3cf52bf5b4514eacbd5d1bdd7c7a9b8b67..f8cf6f4e2d25c0c03a3a73dca8e6bc1990b3b78b 100644
--- a/python/paddle/fluid/tests/unittests/test_layers.py
+++ b/python/paddle/fluid/tests/unittests/test_layers.py
@@ -387,6 +387,12 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(output)
print(str(program))
+ def test_l2_normalize(self):
+ program = Program()
+ with program_guard(program):
+ x = layers.data(name='x', shape=[8, 7, 10], dtype="float32")
+ output = layers.l2_normalize(x, axis=1)
+
def test_maxout(self):
program = Program()
with program_guard(program):
diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py
index 6feda175fb537db894ac7f19e22297f6062a4d61..108a665f37f5cd652ec83f784a56ca52e6b49fe8 100644
--- a/python/paddle/fluid/tests/unittests/test_norm_op.py
+++ b/python/paddle/fluid/tests/unittests/test_norm_op.py
@@ -17,44 +17,23 @@ import numpy as np
from op_test import OpTest
-def norm(input, scale, epsilon):
- s0, s1, s2, s3 = input.shape
- x_square = input * input
- for i in xrange(s0):
- input_batch = input[i:i + 1, :, :, :]
- input_batch = input_batch.reshape(s1, s2 * s3)
- x_square_batch = x_square[i:i + 1, :, :, :]
- x_square_batch = x_square_batch.reshape(s1, s2 * s3)
- square_colsum = x_square_batch.sum(axis=0) + epsilon
- tmp = pow(square_colsum, 0.5)
- tmp = np.reciprocal(tmp)
- tmp_tile = np.tile(tmp, s1)
- tmp_tile = tmp_tile.reshape(s1, s2 * s3)
- scale_tile = np.tile(scale, (1, s2 * s3))
- scale_tile = scale_tile.reshape(s1, s2 * s3)
- out_batch = input_batch * tmp_tile * scale_tile
- out_batch = out_batch.reshape(1, s1, s2, s3)
- if i == 0:
- out = out_batch
- else:
- out = np.concatenate((out, out_batch), 0)
- out.reshape(s0, s1, s2, s3)
- return out
+def l2_norm(x, axis, epsilon):
+ x2 = x**2
+ s = np.sum(x2, axis=axis, keepdims=True)
+ r = np.sqrt(s + epsilon)
+ y = x / np.broadcast_to(r, x.shape)
+ return y, r
class TestNormOp(OpTest):
def setUp(self):
self.op_type = "norm"
self.init_test_case()
- input = np.random.random(self.shape).astype("float32")
- scale = np.array([10, 10, 10])
- self.inputs = {
- 'X': input.astype('float32'),
- 'Scale': scale.astype('float32')
- }
- self.attrs = {'epsilon': self.epsilon}
- output = norm(input, scale, self.epsilon)
- self.outputs = {'Out': output.astype('float32')}
+ x = np.random.random(self.shape).astype("float64")
+ y, norm = l2_norm(x, self.axis, self.epsilon)
+ self.inputs = {'X': x}
+ self.attrs = {'epsilon': self.epsilon, 'axis': self.axis}
+ self.outputs = {'Out': y, 'Norm': norm}
def test_check_output(self):
self.check_output()
@@ -63,8 +42,23 @@ class TestNormOp(OpTest):
self.check_grad(['X'], 'Out')
def init_test_case(self):
- self.shape = [2, 3, 2, 2]
- self.epsilon = 1e-6
+ self.shape = [2, 3, 4, 4]
+ self.axis = 1
+ self.epsilon = 1e-8
+
+
+class TestNormOp2(TestNormOp):
+ def init_test_case(self):
+ self.shape = [5, 3, 9, 7]
+ self.axis = 0
+ self.epsilon = 1e-8
+
+
+class TestNormOp3(TestNormOp):
+ def init_test_case(self):
+ self.shape = [5, 3, 2, 7]
+ self.axis = -1
+ self.epsilon = 1e-8
if __name__ == '__main__':
diff --git a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py
index ef34893943d8f6bf91b1eb14378e463c178de84d..198c68866d399023c51c2a43b588aa8ec49c3c9a 100644
--- a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py
+++ b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py
@@ -70,8 +70,9 @@ class TestNormalization(unittest.TestCase):
def l2_normalize(self, data, axis, epsilon):
""" Compute the groundtruth.
"""
- output = data * np.reciprocal(
- np.sum(np.square(data), axis=axis, keepdims=True))
+ output = data / np.broadcast_to(
+ np.sqrt(np.sum(np.square(data), axis=axis, keepdims=True)),
+ data.shape)
return output
def test_l2_normalize(self):
diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a48bce3bb7c74551a365fd471f6869b128babac
--- /dev/null
+++ b/python/paddle/fluid/tests/unittests/test_slice_op.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+class TestSliceOp(OpTest):
+ def setUp(self):
+ self.op_type = "slice"
+ self.config()
+ self.inputs = {'Input': self.input}
+ self.outputs = {'Out': self.out}
+ self.attrs = {
+ 'axes': self.axes,
+ 'starts': self.starts,
+ 'ends': self.ends
+ }
+
+ def config(self):
+ self.input = np.random.random([3, 4, 5, 6]).astype("float32")
+ self.starts = [1, 0, 2]
+ self.ends = [3, 3, 4]
+ self.axes = [0, 1, 2]
+ self.out = self.input[1:3, 0:3, 2:4, :]
+
+ def test_check_output(self):
+ self.check_output()
+
+
+class TestCase1(TestSliceOp):
+ def config(self):
+ self.input = np.random.random([3, 4, 5, 6]).astype("float32")
+ self.starts = [-3, 0, 2]
+ self.ends = [3, 100, -1]
+ self.axes = [0, 1, 2]
+ self.out = self.input[-3:3, 0:100, 2:-1, :]
+
+
+class TestCase2(TestSliceOp):
+ def config(self):
+ self.input = np.random.random([3, 4, 5, 6]).astype("float32")
+ self.starts = [-3, 0, 2]
+ self.ends = [3, 100, -1]
+ self.axes = [0, 1, 3]
+ self.out = self.input[-3:3, 0:100, :, 2:-1]
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py
index 5ec5b41544c686f88c95da36cedaa682f516caae..9c604170b8b53c9cbcf39b4978ae60ccad84648c 100644
--- a/python/paddle/fluid/transpiler/distribute_transpiler.py
+++ b/python/paddle/fluid/transpiler/distribute_transpiler.py
@@ -515,35 +515,38 @@ class DistributeTranspiler:
grad_to_block_id, None)
# process distributed lookup_table
- prefetch_block = None
+ prefetch_var_name_to_block_id = []
if self.has_distributed_lookup_table:
pserver_index = self.pserver_endpoints.index(endpoint)
table_opt_block = self._create_table_optimize_block(
pserver_index, pserver_program, pre_block_idx, grad_to_block_id)
- prefetch_block = self._create_prefetch_block(
+ prefetch_var_name_to_block_id = self._create_prefetch_block(
pserver_index, pserver_program, table_opt_block)
# NOTE: if has_distributed_lookup_table is False, then prefetch_block will
# not be executed, so it's safe to use optimize_block to hold the place
if self.has_distributed_lookup_table:
- assert prefetch_block is not None
+ assert len(prefetch_var_name_to_block_id) > 0
else:
- assert prefetch_block is None
- prefetch_block = pserver_program.global_block()
+ assert len(prefetch_var_name_to_block_id) == 0
+
+ attrs = {
+ "OptimizeBlock": pserver_program.block(1),
+ "endpoint": endpoint,
+ "Fanin": self.trainer_num,
+ "sync_mode": self.sync_mode,
+ "grad_to_block_id": grad_to_block_id
+ }
+ if len(prefetch_var_name_to_block_id) > 0:
+ attrs['prefetch_var_name_to_block_id'] \
+ = prefetch_var_name_to_block_id
# step5 append the listen_and_serv op
pserver_program.global_block().append_op(
type="listen_and_serv",
inputs={'X': recv_inputs},
outputs={},
- attrs={
- "OptimizeBlock": pserver_program.block(1),
- "endpoint": endpoint,
- "Fanin": self.trainer_num,
- "PrefetchBlock": prefetch_block,
- "sync_mode": self.sync_mode,
- "grad_to_block_id": grad_to_block_id
- })
+ attrs=attrs)
pserver_program.sync_with_cpp()
return pserver_program
@@ -608,8 +611,15 @@ class DistributeTranspiler:
def _replace_lookup_table_op_with_prefetch(self, program,
pserver_endpoints):
# 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op
- self.prefetch_input_vars = None
- self.prefetch_output_vars = None
+ # self.all_prefetch_input_vars =
+ # [[var0_prefetch_in_pserver0, var0_prefetch_in_pserver1]
+ # [var1_prefetch_in_pserver0, var1_prefetch_in_pserver1]]
+ self.all_prefetch_input_vars = []
+
+ # self.all_prefetch_input_vars =
+ # [[var0_prefetch_in_pserver0, var0_prefetch_in_pserver1]
+ # [var1_prefetch_in_pserver0, var1_prefetch_in_pserver1]]
+ self.all_prefetch_output_vars = []
continue_search_lookup_table_op = True
while continue_search_lookup_table_op:
@@ -623,18 +633,19 @@ class DistributeTranspiler:
ids_name = op.input("Ids")
out_name = op.output("Out")
- if self.prefetch_input_vars is None:
- ids_var = program.global_block().vars[ids_name[0]]
- self.prefetch_input_vars = self.create_splited_vars(
- source_var=ids_var,
- block=program.global_block(),
- tag="_prefetch_in_")
- if self.prefetch_output_vars is None:
- out_var = program.global_block().vars[out_name[0]]
- self.prefetch_output_vars = self.create_splited_vars(
- source_var=out_var,
- block=program.global_block(),
- tag="_prefetch_out_")
+ ids_var = program.global_block().vars[ids_name[0]]
+ prefetch_input_vars = self.create_splited_vars(
+ source_var=ids_var,
+ block=program.global_block(),
+ tag="_prefetch_in_")
+ self.all_prefetch_input_vars.append(prefetch_input_vars)
+
+ out_var = program.global_block().vars[out_name[0]]
+ prefetch_output_vars = self.create_splited_vars(
+ source_var=out_var,
+ block=program.global_block(),
+ tag="_prefetch_out_")
+ self.all_prefetch_output_vars.append(prefetch_output_vars)
# insert split_ids_op
program.global_block().insert_op(
@@ -646,14 +657,14 @@ class DistributeTranspiler:
for varname in ids_name
]
},
- outputs={"Out": self.prefetch_input_vars})
+ outputs={"Out": prefetch_input_vars})
# insert prefetch_op
program.global_block().insert_op(
index=lookup_table_op_index + 1,
type="prefetch",
- inputs={'X': self.prefetch_input_vars},
- outputs={"Out": self.prefetch_output_vars},
+ inputs={'X': prefetch_input_vars},
+ outputs={"Out": prefetch_output_vars},
attrs={
"epmap": pserver_endpoints,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
@@ -668,7 +679,7 @@ class DistributeTranspiler:
program.global_block().vars[varname]
for varname in ids_name
],
- 'X': self.prefetch_output_vars
+ 'X': prefetch_output_vars
},
outputs={
"Out": [
@@ -714,30 +725,34 @@ class DistributeTranspiler:
optimize_block):
# STEP: create prefetch block
table_var = pserver_program.global_block().vars[self.table_name]
- prefetch_block = pserver_program.create_block(optimize_block.idx)
- trainer_ids = self.prefetch_input_vars[pserver_index]
- pserver_ids = pserver_program.global_block().create_var(
- name=trainer_ids.name,
- type=trainer_ids.type,
- shape=trainer_ids.shape,
- dtype=trainer_ids.dtype)
- trainer_out = self.prefetch_output_vars[pserver_index]
- pserver_out = pserver_program.global_block().create_var(
- name=trainer_out.name,
- type=trainer_out.type,
- shape=trainer_out.shape,
- dtype=trainer_out.dtype)
- prefetch_block.append_op(
- type="lookup_sparse_table",
- inputs={'Ids': pserver_ids,
- "W": table_var},
- outputs={"Out": pserver_out},
- attrs={
- "is_sparse": True, # has no effect on lookup_table op
- "is_distributed": True,
- "padding_idx": -1
- })
- return prefetch_block
+ prefetch_var_name_to_block_id = []
+ for index in range(len(self.all_prefetch_input_vars)):
+ prefetch_block = pserver_program.create_block(optimize_block.idx)
+ trainer_ids = self.all_prefetch_input_vars[index][pserver_index]
+ pserver_ids = pserver_program.global_block().create_var(
+ name=trainer_ids.name,
+ type=trainer_ids.type,
+ shape=trainer_ids.shape,
+ dtype=trainer_ids.dtype)
+ trainer_out = self.all_prefetch_output_vars[index][pserver_index]
+ pserver_out = pserver_program.global_block().create_var(
+ name=trainer_out.name,
+ type=trainer_out.type,
+ shape=trainer_out.shape,
+ dtype=trainer_out.dtype)
+ prefetch_block.append_op(
+ type="lookup_sparse_table",
+ inputs={'Ids': pserver_ids,
+ "W": table_var},
+ outputs={"Out": pserver_out},
+ attrs={
+ "is_sparse": True, # has no effect on lookup_table op
+ "is_distributed": True,
+ "padding_idx": -1
+ })
+ prefetch_var_name_to_block_id.append(trainer_ids.name + ":" + str(
+ prefetch_block.idx))
+ return prefetch_var_name_to_block_id
def _create_table_optimize_block(self, pserver_index, pserver_program,
pre_block_idx, grad_to_block_id):
diff --git a/tools/codestyle/docstring_checker.py b/tools/codestyle/docstring_checker.py
index 48100e5bf989520043b5ca372b02883faea8a9fd..54a690462699651d3e14f9b24383df01a9740336 100644
--- a/tools/codestyle/docstring_checker.py
+++ b/tools/codestyle/docstring_checker.py
@@ -126,9 +126,10 @@ class DocstringChecker(BaseChecker):
'W9002':
('Doc string does not end with "." period', symbol + "-end-with",
'Used when a doc string does not end with a period'),
- 'W9003': ('All args with their types must be mentioned in doc string',
- symbol + "-with-all-args",
- 'Used when not all arguments are in the doc string '),
+ 'W9003':
+ ('All args with their types must be mentioned in doc string %s',
+ symbol + "-with-all-args",
+ 'Used when not all arguments are in the doc string '),
'W9005': ('Missing docstring or docstring is too short',
symbol + "-missing", 'Add docstring longer >=10'),
'W9006': ('Docstring indent error, use 4 space for indent',
@@ -178,6 +179,8 @@ class DocstringChecker(BaseChecker):
self.indent_style(node)
def missing_doc_string(self, node):
+ if node.name.startswith("__") or node.name.startswith("_"):
+ return True
if node.tolineno - node.fromlineno <= 10:
return True
@@ -199,12 +202,16 @@ class DocstringChecker(BaseChecker):
doc = node.doc
lines = doc.splitlines()
+ line_num = 0
for l in lines:
+ if line_num == 0:
+ continue
cur_indent = len(l) - len(l.lstrip())
if cur_indent % indent != 0:
self.add_message('W9006', node=node, line=node.fromlineno)
return False
+ line_num += 1
return True
@@ -320,15 +327,19 @@ class DocstringChecker(BaseChecker):
return True
parsed_args = doc.args
+ args_not_documented = set(args) - set(parsed_args)
if len(args) > 0 and len(parsed_args) <= 0:
- print "debug:parsed args: ", parsed_args
- self.add_message('W9003', node=node, line=node.fromlineno)
+ self.add_message(
+ 'W9003',
+ node=node,
+ line=node.fromlineno,
+ args=list(args_not_documented))
return False
for t in args:
if t not in parsed_args:
- print t, " with (type) not in ", parsed_args
- self.add_message('W9003', node=node, line=node.fromlineno)
+ self.add_message(
+ 'W9003', node=node, line=node.fromlineno, args=[t, ])
return False
return True
diff --git a/tools/codestyle/pylint_pre_commit.hook b/tools/codestyle/pylint_pre_commit.hook
index e7c92ba671e0eb778b2ab5447bea7c4b14fe761b..150a3f5666bd39d30b7e6518e58a14fb5fe2f14b 100755
--- a/tools/codestyle/pylint_pre_commit.hook
+++ b/tools/codestyle/pylint_pre_commit.hook
@@ -7,13 +7,13 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PYTHONPATH=$DIR:$PYTHONPATH
# The trick to remove deleted files: https://stackoverflow.com/a/2413151
-for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do
+for file in $(git diff --name-status | awk '$1 != "D" {print $2}'); do
pylint --disable=all --load-plugins=docstring_checker \
--enable=doc-string-one-line,doc-string-end-with,doc-string-with-all-args,doc-string-triple-quotes,doc-string-missing,doc-string-indent-error,doc-string-with-returns,doc-string-with-raises $file;
TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?);
done
-#exit $TOTAL_ERRORS
+exit $TOTAL_ERRORS
#For now, just warning:
-exit 0
+#exit 0