提交 3e6e5c92 编写于 作者: F fengjiayi

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into refactor_registry_macro

...@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以 ...@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以
.. code-block:: bash .. code-block:: bash
docker run -it --rm paddlepaddle/paddle:0.10.0-dev /bin/bash docker run -it --rm -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /bin/bash
或者,可以以后台进程方式运行容器: 或者,可以以后台进程方式运行容器:
.. code-block:: bash .. code-block:: bash
docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:0.10.0-dev docker run -d -p 2202:22 -p 8888:8888 -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /usr/sbin/sshd -D
然后用密码 :code:`root` SSH进入容器: 然后用密码 :code:`root` SSH进入容器:
......
...@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context) ...@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context)
cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(tensor_test SRCS tensor_test.cc DEPS tensor)
cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_library(lod_tensor SRCS lod_tensor.cc details/lod_tensor.cc DEPS ddim place tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor)
cc_test(variable_test SRCS variable_test.cc) cc_test(variable_test SRCS variable_test.cc)
...@@ -15,23 +15,19 @@ cc_test(variable_test SRCS variable_test.cc) ...@@ -15,23 +15,19 @@ cc_test(variable_test SRCS variable_test.cc)
cc_library(scope SRCS scope.cc) cc_library(scope SRCS scope.cc)
cc_test(scope_test SRCS scope_test.cc DEPS scope) cc_test(scope_test SRCS scope_test.cc DEPS scope)
proto_library(attribute_proto SRCS attribute.proto) proto_library(framework_proto SRCS framework.proto)
proto_library(op_proto SRCS op_proto.proto DEPS attribute_proto)
proto_library(op_desc SRCS op_desc.proto DEPS attribute_proto)
cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf)
cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf)
cc_library(attribute SRCS attribute.cc DEPS op_desc op_proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto)
cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor scope attribute) cc_library(operator SRCS operator.cc DEPS framework_proto device_context tensor scope attribute)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator)
cc_library(op_registry SRCS op_registry.cc DEPS op_desc grad_op_builder) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry)
cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op)
py_proto_compile(framework_py_proto SRCS attribute.proto op_proto.proto op_desc.proto) py_proto_compile(framework_py_proto SRCS framework.proto)
# Generate an empty __init__.py to make framework_py_proto as a valid python module. # Generate an empty __init__.py to make framework_py_proto as a valid python module.
add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(framework_py_proto framework_py_proto_init) add_dependencies(framework_py_proto framework_py_proto_init)
......
...@@ -44,7 +44,7 @@ AttrType AttrTypeID<std::vector<std::string>>() { ...@@ -44,7 +44,7 @@ AttrType AttrTypeID<std::vector<std::string>>() {
return STRINGS; return STRINGS;
} }
Attribute GetAttrValue(const AttrDesc& attr_desc) { Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
switch (attr_desc.type()) { switch (attr_desc.type()) {
case paddle::framework::AttrType::INT: { case paddle::framework::AttrType::INT: {
return attr_desc.i(); return attr_desc.i();
......
...@@ -20,8 +20,7 @@ limitations under the License. */ ...@@ -20,8 +20,7 @@ limitations under the License. */
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "paddle/framework/attribute.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/variant.h" #include "paddle/platform/variant.h"
...@@ -37,7 +36,7 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap; ...@@ -37,7 +36,7 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap;
template <typename T> template <typename T>
AttrType AttrTypeID(); AttrType AttrTypeID();
Attribute GetAttrValue(const AttrDesc& attr_desc); Attribute GetAttrValue(const OpDesc::Attr& attr_desc);
// check whether a value(attribute) fit a certain limit // check whether a value(attribute) fit a certain limit
template <typename T> template <typename T>
......
...@@ -21,15 +21,25 @@ ...@@ -21,15 +21,25 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
static bool AllInSet(const std::vector<std::string>& names, template <typename Map, typename T>
const std::string& suffix, static void ForEachVarName(Map& names, T callback) {
const std::unordered_set<std::string>& set) {
for (auto& name : names) { for (auto& name : names) {
if (set.find(name + suffix) == set.end()) { for (auto& n : name.second) {
return false; if (callback(n)) return;
} }
} }
return true; }
// return whether all the names + suffixes in the set
static bool AllInSet(
const std::map<std::string, std::vector<std::string>>& names,
const std::string& suffix, const std::unordered_set<std::string>& set) {
bool all_in_set = true;
ForEachVarName(names, [&all_in_set, &set, &suffix](const std::string& n) {
all_in_set = set.find(n + suffix) != set.end();
return !all_in_set;
});
return all_in_set;
} }
static std::shared_ptr<OperatorBase> NOP() { static std::shared_ptr<OperatorBase> NOP() {
...@@ -39,7 +49,7 @@ static std::shared_ptr<OperatorBase> NOP() { ...@@ -39,7 +49,7 @@ static std::shared_ptr<OperatorBase> NOP() {
return net_op; return net_op;
} }
// Get backward operator from a forward operator, recursively implementation. // Get backward operator from a forward operator, a recursive implementation.
// //
// no_grad_names the gradient variable names without gradient calculating. // no_grad_names the gradient variable names without gradient calculating.
// //
...@@ -47,31 +57,35 @@ static std::shared_ptr<OperatorBase> NOP() { ...@@ -47,31 +57,35 @@ static std::shared_ptr<OperatorBase> NOP() {
// BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and // BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and
// pass `uniq_id` through recursive calling. // pass `uniq_id` through recursive calling.
// //
// returns The backward operator. For simple situation, it is a simple // returns The backward operator. In a simple situation, it may be a simple
// operator. For complex situation, it is a NetOp. // operator, in a complex situation, it maybe a NetOp.
// //
// See Backward.h for details // See Backward.h for details
static std::shared_ptr<OperatorBase> BackwardRecursive( static std::shared_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id); std::unordered_set<std::string>& no_grad_names, size_t& uniq_id);
std::shared_ptr<OperatorBase> BackwardRecursive( std::shared_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) { std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) {
// If all input gradients of forwarding operator do not need to calculate, // If all input gradients of forwarding operator do not need to calculate,
// just return an NOP. Not return null ptr because NOP does not take // just return an NOP. Not return null ptr because NOP does not take
// too much time for calculation, but it is useful for simplifying logic. // much time for calculation, but it is useful for simplifying logic.
if (AllInSet(forwardOp.inputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.inputs_ /*names*/, kGradVarSuffix /*suffix*/,
no_grad_names /*set*/)) {
return NOP(); return NOP();
} }
// All output gradients of forwarding operator do not need to calculate. // All output gradients of forwarding operator do not need to calculate.
// Then all input gradients cannot be computed at all, and we put them into // Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP. // `no_grad_names` set. Return an NOP.
if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.outputs_ /*names*/, kGradVarSuffix /*suffix*/,
for (auto& name : forwardOp.inputs_) { no_grad_names /*set*/)) {
// Mark all input is not need ForEachVarName(forwardOp.inputs_,
no_grad_names.insert(name + kGradVarSuffix); [&no_grad_names](const std::string& name) -> bool {
} no_grad_names.insert(GradVarName(name));
return false;
});
return NOP(); return NOP();
} }
...@@ -83,55 +97,65 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -83,55 +97,65 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp); auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp);
// Map from output gradient variable name to operator's indices in // Map from output gradient variable name to operator's indices in
// backward net. That operator generates that variable. // backward net's ops_. That operator generates that variable.
std::unordered_map<std::string, std::vector<size_t>> dup_output_ops; std::unordered_map<std::string, std::vector<size_t>> dup_output_ops;
size_t local_op_id = 0; size_t local_op_id = 0;
// reversely travel forwardNet // reversely travel forwardNet and collect all duplicate outputs.
for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend();
++it, ++local_op_id) { ++it, ++local_op_id) {
auto fwd = *it; auto fwd = *it;
auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id); auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id);
net->AddOp(bwd); net->AddOp(bwd);
for (auto& out : bwd->outputs_) { ForEachVarName(bwd->outputs_,
dup_output_ops[out].emplace_back(local_op_id); [&dup_output_ops, local_op_id](const std::string& out) {
} dup_output_ops[out].emplace_back(local_op_id);
return false;
});
} }
// Get unique ID for this method. // Get unique ID for this method.
auto uid = uniq_id++; auto uid = uniq_id++;
// TODO(dzh): more comment // TODO(dzh): more comment
// multiple operators which have the same output (y for example) may
// overwrite the same y variable when backward, special operations are token
// to handle this case. For each duplicate output, rename it to an alias
// (original name with a offset), append an `add` op for its operator,
// and finally sum all the alias variable to the final output variable y.
using Pos = std::pair<size_t, std::shared_ptr<OperatorBase>>; using Pos = std::pair<size_t, std::shared_ptr<OperatorBase>>;
std::list<Pos> insert_position; std::list<Pos> insert_position;
for (auto& dup_output_op : dup_output_ops) { for (auto& dup_output_op : dup_output_ops) {
const std::string& name = dup_output_op.first; const std::string& name = dup_output_op.first;
auto& dup_op = dup_output_op.second; auto& dup_op = dup_output_op.second;
// no duplicate output
if (dup_op.size() == 1) continue; if (dup_op.size() == 1) continue;
std::vector<std::string> dup_outputs;
// process the duplicate outputs
std::vector<std::string> dup_outputs;
for (size_t i = 0; i < dup_op.size(); ++i) { for (size_t i = 0; i < dup_op.size(); ++i) {
// rename each duplicate output to an alias
auto op_offset = dup_op[i]; auto op_offset = dup_op[i];
dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" +
std::to_string(i)); std::to_string(i));
net->ops_[op_offset]->Rename(name, dup_outputs.back()); net->ops_[op_offset]->Rename(name, dup_outputs.back());
} }
// collect all the offset to append `add` op for each alias
insert_position.push_back( insert_position.push_back(
{dup_op.back(), {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}},
OpRegistry::CreateOp( {{"Out", {name}}}, {})});
"add", {dup_outputs}, {name},
{{"input_format",
std::vector<int>{0, static_cast<int>(dup_outputs.size())}}})});
} }
// make sure the inserted `add` ops follow the BFS order.
insert_position.sort( insert_position.sort(
[](const Pos& l, const Pos& r) { return l.first > r.first; }); [](const Pos& l, const Pos& r) { return l.first > r.first; });
for (auto& pos : insert_position) { for (auto& pos : insert_position) {
net->InsertOp(pos.first + 1, pos.second); net->InsertOp(pos.first + 1, pos.second);
} }
} else { } else {
std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp); std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp);
for (std::string& grad_input : grad_op->inputs_) {
ForEachVarName(grad_op->inputs_, [&no_grad_names,
&net](std::string& grad_input) {
if (no_grad_names.count(grad_input)) { if (no_grad_names.count(grad_input)) {
// +1 for \0 // +1 for \0
std::string prefix = grad_input.substr( std::string prefix = grad_input.substr(
...@@ -140,16 +164,19 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -140,16 +164,19 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill // If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient. // zero variables to that input gradient.
net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {prefix}, net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {{"Src", {prefix}}},
{grad_input}, {})); {{"Dst", {grad_input}}}, {}));
} }
} return false;
});
for (std::string& grad_output : grad_op->outputs_) { ForEachVarName(grad_op->outputs_,
if (no_grad_names.count(grad_output)) { [&no_grad_names](std::string& grad_output) {
grad_output = kEmptyVarName; if (no_grad_names.count(grad_output)) {
} grad_output = kEmptyVarName;
} }
return false;
});
if (net->ops_.empty()) { // Current no aux op is added to network if (net->ops_.empty()) { // Current no aux op is added to network
return grad_op; return grad_op;
...@@ -159,7 +186,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -159,7 +186,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
net->type_ = "@GENERATED_BACKWARD@"; net->type_ = "@GENERATED_BACKWARD@";
net->CompleteAddOp(); net->CompleteAddOp();
return net; return net;
} } // namespace framework
// See header for comments // See header for comments
std::shared_ptr<OperatorBase> Backward( std::shared_ptr<OperatorBase> Backward(
......
...@@ -30,8 +30,7 @@ using DeviceContext = platform::DeviceContext; ...@@ -30,8 +30,7 @@ using DeviceContext = platform::DeviceContext;
class EmptyOp : public OperatorBase { class EmptyOp : public OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(EmptyOp, OperatorBase) using OperatorBase::OperatorBase;
void InferShape(const Scope &scope) const override {} void InferShape(const Scope &scope) const override {}
void Run(const Scope &scope, const DeviceContext &dev_ctx) const override {} void Run(const Scope &scope, const DeviceContext &dev_ctx) const override {}
}; };
...@@ -40,9 +39,9 @@ class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { ...@@ -40,9 +39,9 @@ class RowWiseAddOpMaker : public OpProtoAndCheckerMaker {
public: public:
RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input X of Add").IgnoreGradient(); AddInput("X", "Input X of Add").AsNoGradient();
AddInput("b", "Bias of Add").IgnoreGradient(); AddInput("b", "Bias of Add").AsNoGradient();
AddOutput("Out", "Out of Add").IgnoreGradient(); AddOutput("Out", "Out of Add").AsNoGradient();
AddComment("Add Op"); AddComment("Add Op");
} }
}; };
...@@ -51,8 +50,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker { ...@@ -51,8 +50,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker {
public: public:
MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) MulOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("A", "A"); AddInput("X", "A");
AddInput("B", "B"); AddInput("Y", "B");
AddOutput("Out", "Out"); AddOutput("Out", "Out");
AddComment("Mul"); AddComment("Mul");
} }
...@@ -63,7 +62,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker { ...@@ -63,7 +62,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "X"); AddInput("X", "X");
AddOutput("Y", "Y"); AddOutput("Out", "Y");
AddComment("Sigmoid"); AddComment("Sigmoid");
} }
}; };
...@@ -73,21 +72,25 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker { ...@@ -73,21 +72,25 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker) NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "X input"); AddInput("X", "X input");
AddOutput("Y", "Y output"); AddOutput("Out", "Y output");
AddComment("NoGradOp, same input output. no Grad"); AddComment("NoGradOp, same input output. no Grad");
} }
}; };
class FcOp : public operators::NetOp { class FcOp : public operators::NetOp {
public: public:
void Init() override { FcOp(const std::string &type, const VarNameMap &inputs,
AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, const VarNameMap &outputs, const AttributeMap &attrs)
{Output("mul_result")}, {})); : NetOp(type, inputs, outputs, attrs) {
auto b_name = Input("b"); AddOp(OpRegistry::CreateOp("mul",
{{"X", {Input("X")}}, {"Y", {Input("W")}}},
{{"Out", {Output("mul_result")}}}, {}));
auto input_b = Inputs("b");
std::string before_act = "mul_result"; std::string before_act = "mul_result";
if (b_name != kEmptyVarName) { if (input_b.size() != 0) {
AddOp(OpRegistry::CreateOp("rowwise_add", {Output("mul_result"), b_name}, AddOp(OpRegistry::CreateOp(
{Output("add_result")}, {})); "rowwise_add", {{"X", {Output("mul_result")}}, {"b", {input_b[0]}}},
{{"Out", {Output("add_result")}}}, {}));
before_act = "add_result"; before_act = "add_result";
} else { } else {
auto out_varname = Output("add_result"); auto out_varname = Output("add_result");
...@@ -96,8 +99,8 @@ class FcOp : public operators::NetOp { ...@@ -96,8 +99,8 @@ class FcOp : public operators::NetOp {
} }
} }
AddOp(OpRegistry::CreateOp("sigmoid", {Output(before_act)}, {Output("Out")}, AddOp(OpRegistry::CreateOp("sigmoid", {{"X", {Output(before_act)}}},
{})); {{"Out", {Output("Out")}}}, {}));
CompleteAddOp(false); CompleteAddOp(false);
} }
}; };
...@@ -109,8 +112,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker { ...@@ -109,8 +112,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker {
AddInput("X", "x"); AddInput("X", "x");
AddInput("W", "w"); AddInput("W", "w");
AddInput("b", "b"); AddInput("b", "b");
AddOutput("mul_result", "").SetTemporary(); AddOutput("mul_result", "").AsIntermediate();
AddOutput("add_result", "").SetTemporary(); AddOutput("add_result", "").AsIntermediate();
AddOutput("Out", ""); AddOutput("Out", "");
AddComment(""); AddComment("");
} }
...@@ -141,7 +144,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker { ...@@ -141,7 +144,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker {
public: public:
AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) AddOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "x").SetMultiple(); AddInput("X", "x").AsDuplicable();
AddOutput("Y", "y"); AddOutput("Y", "y");
AddComment(""); AddComment("");
} }
...@@ -164,27 +167,24 @@ REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker, ...@@ -164,27 +167,24 @@ REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker,
many_output_op_grad, f::EmptyOp); many_output_op_grad, f::EmptyOp);
TEST(Backward, simple_op_grad) { TEST(Backward, simple_op_grad) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
"rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
auto gop = f::OpRegistry::CreateGradOp(*fwd); auto gop = f::OpRegistry::CreateGradOp(*fwd);
ASSERT_EQ(4UL, gop->inputs_.size()); ASSERT_EQ(1UL, gop->inputs_.size());
ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]);
ASSERT_EQ("rowwise_add_grad", gop->type_); ASSERT_EQ("rowwise_add_grad", gop->type_);
ASSERT_EQ(f::GradVarName("X"), gop->outputs_[0]); ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X")));
ASSERT_EQ(f::GradVarName("b"), gop->outputs_[1]); ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b")));
ASSERT_EQ(f::GradVarName("X"), gop->Output(f::GradVarName("X")));
} }
TEST(Backward, simple_op_not_need_grad) { TEST(Backward, simple_op_not_need_grad) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
"rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
auto gop = f::Backward(*fwd, {"X"}); auto gop = f::Backward(*fwd, {"x"});
ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(), ASSERT_EQ(gop->Output(f::GradVarName("X")), f::kEmptyVarName);
f::GradVarName("X")),
gop->outputs_.end());
auto no_input_gop = f::Backward(*fwd, {"X", "b"}); auto no_input_gop = f::Backward(*fwd, {"x", "b"});
ASSERT_NE(no_input_gop, nullptr); ASSERT_NE(no_input_gop, nullptr);
ASSERT_TRUE(no_input_gop->IsNetOp()); ASSERT_TRUE(no_input_gop->IsNetOp());
ASSERT_EQ(0UL, ASSERT_EQ(0UL,
...@@ -192,8 +192,12 @@ TEST(Backward, simple_op_not_need_grad) { ...@@ -192,8 +192,12 @@ TEST(Backward, simple_op_not_need_grad) {
} }
TEST(Backward, net_fc_backward_normal) { TEST(Backward, net_fc_backward_normal) {
std::shared_ptr<f::OperatorBase> fwd = f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> fwd =
"fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {}); f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}},
{{"mul_result", {"mul_res"}},
{"add_result", {"add_re"}},
{"Out", {"out"}}},
{});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {}); std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
ASSERT_TRUE(gop->IsNetOp()); ASSERT_TRUE(gop->IsNetOp());
...@@ -215,8 +219,11 @@ TEST(Backward, net_fc_backward_normal) { ...@@ -215,8 +219,11 @@ TEST(Backward, net_fc_backward_normal) {
TEST(Backward, net_fc_backward_not_have_b) { TEST(Backward, net_fc_backward_not_have_b) {
std::shared_ptr<f::OperatorBase> fwd = std::shared_ptr<f::OperatorBase> fwd =
f::OpRegistry::CreateOp("fc", {"X", "w", f::kEmptyVarName}, f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {}}},
{"mul_result", "add_result", "tmp"}, {}); {{"mul_result", {"mul_res"}},
{"add_result", {"add_res"}},
{"Out", {"tmp"}}},
{});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {}); std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
ASSERT_TRUE(gop->IsNetOp()); ASSERT_TRUE(gop->IsNetOp());
...@@ -235,38 +242,49 @@ TEST(Backward, net_fc_backward_not_have_b) { ...@@ -235,38 +242,49 @@ TEST(Backward, net_fc_backward_not_have_b) {
TEST(Backward, net_input_of_network_not_need_grad) { TEST(Backward, net_input_of_network_not_need_grad) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, net.AddOp(f::OpRegistry::CreateOp(
{"mul_tmp_0", "add_tmp_0", "hidden0"}, {})); "fc", {{"X", {"x"}}, {"W", {"W1"}}, {"b", {"b1"}}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, {{"mul_result", {"mul_tmp_0"}},
{"mul_tmp_1", "add_tmp_1", "hidden1"}, {})); {"add_result", {"add_tmp_0"}},
{"Out", {"hidden0"}}},
{}));
net.AddOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"hidden0"}}, {"W", {"W2"}}, {"b", {"b2"}}},
{{"mul_result", {"mul_tmp_1"}},
{"add_result", {"add_tmp_1"}},
{"Out", {"hidden1"}}},
{}));
net.CompleteAddOp(); net.CompleteAddOp();
auto bwd = Backward(net, {"X"}); // X@GRAD is not need. auto bwd = Backward(net, {"x"}); // x@GRAD is not need.
ASSERT_TRUE(bwd->IsNetOp()); ASSERT_TRUE(bwd->IsNetOp());
auto bwd_net = static_cast<ops::NetOp *>(bwd.get()); auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
std::unordered_set<std::string> all_output = std::unordered_set<std::string>( auto output_vars = bwd_net->OutputVars(true);
bwd_net->outputs_.begin(), bwd_net->outputs_.end()); std::unordered_set<std::string> all_outputs =
all_output.erase(f::kEmptyVarName); std::unordered_set<std::string>(output_vars.begin(), output_vars.end());
all_outputs.erase(f::kEmptyVarName);
for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) {
ASSERT_NE(all_output.find(f::GradVarName(out)), all_output.end()); ASSERT_NE(all_outputs.find(f::GradVarName(out)), all_outputs.end());
} }
// Not Generated X // Not Generated X
ASSERT_EQ(all_output.find(f::GradVarName("X")), all_output.end()); ASSERT_EQ(all_outputs.find(f::GradVarName("X")), all_outputs.end());
ASSERT_EQ(2UL, bwd_net->ops_.size()); ASSERT_EQ(2UL, bwd_net->ops_.size());
ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp());
auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get()); auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get());
ASSERT_EQ(3UL, first_fc_grad->ops_.size()); ASSERT_EQ(3UL, first_fc_grad->ops_.size());
ASSERT_EQ(f::kEmptyVarName, ASSERT_EQ(f::kEmptyVarName,
first_fc_grad->ops_[2]->Output(f::GradVarName("A"))); first_fc_grad->ops_[2]->Output(f::GradVarName("X")));
} }
TEST(Backward, net_shared_weight) { TEST(Backward, net_shared_weight) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("mul", {"X", "W"}, {"Out"}, {})); net.AddOp(f::OpRegistry::CreateOp("mul", {{"X", {"x"}}, {"Y", {"w"}}},
net.AddOp(f::OpRegistry::CreateOp("mul", {"Out", "W"}, {"FinalOut"}, {})); {{"Out", {"out"}}}, {}));
net.AddOp(f::OpRegistry::CreateOp("mul", {{"X", {"out"}}, {"Y", {"w"}}},
{{"Out", {"FinalOut"}}}, {}));
net.CompleteAddOp(); net.CompleteAddOp();
auto bwd = f::Backward(net, {}); auto bwd = f::Backward(net, {});
...@@ -277,31 +295,37 @@ TEST(Backward, net_shared_weight) { ...@@ -277,31 +295,37 @@ TEST(Backward, net_shared_weight) {
} }
TEST(Backward, op_register_grad_not_for_network) { TEST(Backward, op_register_grad_not_for_network) {
auto fwd = f::OpRegistry::CreateOp( auto fwd =
"fc", {"X", "W", "b"}, {"mul_out", "add_out", "out1"}, f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}},
{{"temporary_index", std::vector<int>{0, 1}}}); {{"mul_result", {"mul_out"}},
{"add_result", {"add_out"}},
{"Out", {"out1"}}},
{{"temporary_index", std::vector<int>{0, 1}}});
ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet);
} }
TEST(Backward, op_all_input_are_not_need) { TEST(Backward, op_all_input_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
auto backward = f::Backward(*fwd, {"X", "b"}); "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"x", "b"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
ASSERT_TRUE(net->ops_.empty()); ASSERT_TRUE(net->ops_.empty());
} }
TEST(Backward, op_all_output_are_not_need) { TEST(Backward, op_all_output_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
auto backward = f::Backward(*fwd, {"Out"}); "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"out"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
ASSERT_TRUE(net->ops_.empty()); ASSERT_TRUE(net->ops_.empty());
} }
TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_output_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {}); auto fwd = f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}},
{{"y", {"Y"}}, {"z", {"Z"}}}, {});
auto backward = f::Backward(*fwd, {"Z"}); auto backward = f::Backward(*fwd, {"Z"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
...@@ -309,10 +333,10 @@ TEST(Backward, op_part_of_output_are_not_need) { ...@@ -309,10 +333,10 @@ TEST(Backward, op_part_of_output_are_not_need) {
auto &fill_zero = *net->ops_[0]; auto &fill_zero = *net->ops_[0];
ASSERT_EQ("fill_zeros_like", fill_zero.type_); ASSERT_EQ("fill_zeros_like", fill_zero.type_);
ASSERT_EQ(1UL, fill_zero.inputs_.size()); ASSERT_EQ(1UL, fill_zero.Inputs("Src").size());
ASSERT_EQ("Z", fill_zero.inputs_[0]); ASSERT_EQ("Z", fill_zero.Input("Src"));
ASSERT_EQ(1UL, fill_zero.outputs_.size()); ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size());
ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.outputs_[0]); ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst"));
auto &d_many_out = *net->ops_[1]; auto &d_many_out = *net->ops_[1];
ASSERT_EQ("many_output_op_grad", d_many_out.type_); ASSERT_EQ("many_output_op_grad", d_many_out.type_);
...@@ -324,44 +348,62 @@ TEST(Backward, op_part_of_output_are_not_need) { ...@@ -324,44 +348,62 @@ TEST(Backward, op_part_of_output_are_not_need) {
} }
TEST(Backward, op_part_of_input_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto fwd = f::OpRegistry::CreateOp("mul", {{"X", {"a"}}, {"Y", {"b"}}},
{{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"a"}); auto backward = f::Backward(*fwd, {"a"});
auto &grad_mul = *backward; auto &grad_mul = *backward;
ASSERT_EQ(grad_mul.type_, "mul_grad"); ASSERT_EQ(grad_mul.type_, "mul_grad");
ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL);
ASSERT_EQ(grad_mul.outputs_.size(), 2UL); ASSERT_EQ(grad_mul.outputs_.size(), 2UL);
ASSERT_EQ(grad_mul.Output(f::GradVarName("A")), f::kEmptyVarName); ASSERT_EQ(grad_mul.Output(f::GradVarName("X")), f::kEmptyVarName);
ASSERT_EQ(grad_mul.Output(f::GradVarName("B")), f::GradVarName("b")); ASSERT_EQ(grad_mul.Output(f::GradVarName("Y")), f::GradVarName("b"));
ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out"));
ASSERT_EQ(grad_mul.Input("A"), "a"); ASSERT_EQ(grad_mul.Input("X"), "a");
ASSERT_EQ(grad_mul.Input("B"), "b"); ASSERT_EQ(grad_mul.Input("Y"), "b");
ASSERT_EQ(grad_mul.Input("Out"), "out"); ASSERT_EQ(grad_mul.Input("Out"), "out");
} }
TEST(Backward, linear_net_intermediate_variable_has_no_grad) { TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, net.AddOp(f::OpRegistry::CreateOp(
{"mul_out1", "add_out1", "out1"}, {})); "fc", {{"X", {"x1"}}, {"W", {"w1"}}, {"b", {"b1"}}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {{"mul_result", {"mul_out1"}},
{"mul_out2", "tmp_out2", "out2"}, {})); {"add_result", {"add_out1"}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"Out", {"out1"}}},
{"mul_out3", "tmp_out3", "out3"}, {})); {}));
net.AddOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"out1"}}, {"W", {"w2"}}, {"b", {"b2"}}},
{{"mul_result", {"mul_out2"}},
{"add_result", {"tmp_out2"}},
{"Out", {"out2"}}},
{}));
net.AddOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"out2"}}, {"W", {"w3"}}, {"b", {"b3"}}},
{{"mul_result", {"mul_out3"}},
{"add_result", {"tmp_out3"}},
{"Out", {"out3"}}},
{}));
net.CompleteAddOp(); net.CompleteAddOp();
auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto bwd_net = static_cast<ops::NetOp *>(backward.get()); auto bwd_net = static_cast<ops::NetOp *>(backward.get());
ASSERT_EQ(bwd_net->ops_.size(), 3UL); ASSERT_EQ(bwd_net->ops_.size(), 3UL);
auto &grad_fc = *bwd_net->ops_[0]; auto &grad_fc = *bwd_net->ops_[0];
EXPECT_EQ(grad_fc.inputs_.size(),
3UL /* external input number */ const char *all = paddle::operators::NetOp::kAll;
EXPECT_EQ(grad_fc.inputs_[all].size(),
2UL /* external input number */
+ 1UL /* external output number*/ + 1UL /* external output number*/
+ 1UL /* number of gradient of external output*/ + 1UL /* number of gradient of external output*/
+ 2U /* internal variable number*/); + 2U /* internal variable number*/);
EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/ EXPECT_EQ(grad_fc.outputs_[all].size(),
+ 2UL /* input number of rowwise_add */ 2UL /* input number of mul*/
+ 1UL /* input number of sigmod */); + 2UL /* input number of rowwise_add
EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL); */
EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL); + 1UL /* input number of sigmod */);
EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->inputs_[all].size(), 0UL);
EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->outputs_[all].size(), 0UL);
EXPECT_EQ(bwd_net->ops_[2]->inputs_[all].size(), 0UL);
EXPECT_EQ(bwd_net->ops_[2]->outputs_[all].size(), 0UL);
} }
...@@ -283,6 +283,5 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { ...@@ -283,6 +283,5 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
DDim::DDim(std::initializer_list<int> init_list) { DDim::DDim(std::initializer_list<int> init_list) {
*this = make_ddim(init_list); *this = make_ddim(init_list);
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_tensor.h"
#include <memory>
namespace paddle {
namespace framework {
namespace details {
using LOD = LODTensor::LOD;
std::shared_ptr<LOD> SliceLOD(const LOD &lod, size_t level_begin,
size_t level_end) {
auto new_lod = std::make_shared<LOD>();
new_lod->reserve(level_end - level_begin);
for (size_t i = level_begin; i < level_end; i++) {
new_lod->emplace_back(lod[i]);
}
return new_lod;
}
std::shared_ptr<LOD> SliceLOD(const LOD &lod, size_t level, size_t elem_begin,
size_t elem_end, bool tensor_shared) {
// slice the lod.
auto new_lod = std::make_shared<LOD>();
new_lod->reserve(lod.size() - level);
auto start = lod.at(level)[elem_begin];
auto end = lod.at(level)[elem_end];
for (auto it = lod.begin() + level; it != lod.end(); it++) {
auto it_begin = std::find(it->begin(), it->end(), start);
auto it_end = std::find(it_begin, it->end(), end);
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info");
PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info");
new_lod->emplace_back(it_begin, it_end + 1);
if (!tensor_shared) {
// reset offset if tensor is copyed and sliced.
std::transform(new_lod->back().begin(), new_lod->back().end(),
new_lod->back().begin(),
[start](int v) { return v - start; });
PADDLE_ENFORCE(new_lod->back().front() == 0, "error in slice LOD");
}
}
return new_lod;
}
} // namespace details
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
namespace paddle {
namespace framework {
namespace details {
/*
* Slice levels from LOD.
*
* @lod: LOD to slice.
* @level_begin: level to begin slice.
* @level_end: level to end slice.
*/
std::shared_ptr<LODTensor::LOD> SliceLOD(const LODTensor::LOD &lod,
size_t level_begin, size_t level_end);
/*
* Slice elements from a level of LOD.
*
* @lod: LOD to slice.
* @level: which level to slice.
* @elem_begin: element's index to begin slice.
* @elem_end: element's index to end slice.
*/
std::shared_ptr<LODTensor::LOD> SliceLOD(const LODTensor::LOD &lod,
size_t level, size_t elem_begin,
size_t elem_end, bool tensor_shared);
} // namespace details
} // namespace framework
} // namespace paddle
...@@ -15,9 +15,6 @@ limitations under the License. */ ...@@ -15,9 +15,6 @@ limitations under the License. */
syntax = "proto2"; syntax = "proto2";
package paddle.framework; package paddle.framework;
// Attribute Type for paddle's Op.
// Op contains many attributes. Each type of attributes could be different.
// The AttrType will be shared between AttrDesc and AttrProto.
enum AttrType { enum AttrType {
INT = 0; INT = 0;
FLOAT = 1; FLOAT = 1;
...@@ -25,4 +22,61 @@ enum AttrType { ...@@ -25,4 +22,61 @@ enum AttrType {
INTS = 3; INTS = 3;
FLOATS = 4; FLOATS = 4;
STRINGS = 5; STRINGS = 5;
} }
\ No newline at end of file
// OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type.
message OpDesc {
message Attr {
required string name = 1;
required AttrType type = 2;
optional int32 i = 3;
optional float f = 4;
optional string s = 5;
repeated int32 ints = 6;
repeated float floats = 7;
repeated string strings = 8;
};
message Var {
required string parameter = 1;
repeated string arguments = 2;
};
required string type = 3;
repeated Var inputs = 1;
repeated Var outputs = 2;
repeated Attr attrs = 4;
};
// OpProto describes a C++ framework::OperatorBase derived class.
message OpProto {
// VarProto describes the C++ type framework::Variable.
message Var {
required string name = 1;
required string comment = 2;
optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ];
optional bool no_gradient = 5 [ default = false ];
}
// AttrProto describes the C++ type Attribute.
message Attr {
required string name = 1;
required AttrType type = 2;
required string comment = 3;
// If that attribute is generated, it means the Paddle third
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ];
}
required string type = 1;
repeated Var inputs = 2;
repeated Var outputs = 3;
repeated Attr attrs = 4;
required string comment = 5;
}
...@@ -13,63 +13,28 @@ express or implied. See the License for the specific language governing ...@@ -13,63 +13,28 @@ express or implied. See the License for the specific language governing
permissions and limitations under the License. */ permissions and limitations under the License. */
#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_proto.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
typedef std::vector<int> Ints;
enum class OpArgType { IN, OUT }; enum class OpArgType { IN, OUT };
const Ints* AttrFormat(const AttributeMap& attrs, const std::string& key) { static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type,
return (attrs.count(key) > 0) ? &boost::get<Ints>(attrs.at(key)) : nullptr; bool is_grad, OperatorBase::VarNameMap* vars) {
} const auto& src_inout =
src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_;
Ints* AttrFormat(AttributeMap& attrs, const std::string& key) { auto& dst_inout = *vars;
return (attrs.count(key) > 0) ? &boost::get<Ints>(attrs.at(key)) : nullptr;
}
static void TransOpArg(const OperatorBase* src_op,
std::vector<std::string>& grad_inputs,
std::vector<std::string>& grad_outputs,
AttributeMap& grad_attrs,
std::unordered_map<std::string, int>& grad_idxs,
const std::string& src_type, const std::string& dst_type,
int& idx, bool is_grad) {
const std::vector<std::string>& src_inout =
(src_type == "input_format") ? src_op->inputs_ : src_op->outputs_;
const std::vector<int>* src_format = AttrFormat(src_op->Attrs(), src_type);
std::vector<std::string>& dst_inout =
(dst_type == "input_format") ? grad_inputs : grad_outputs;
std::vector<int>* dst_format = AttrFormat(grad_attrs, dst_type);
const OpProto& proto = *(OpRegistry::op_info_map().at(src_op->type_).proto_);
const auto& src_arg_list = const auto& src_arg_list =
(src_type == "input_format") ? proto.inputs() : proto.outputs(); src_type == OpArgType::IN ? proto.inputs() : proto.outputs();
for (const auto& arg : src_arg_list) { for (const auto& arg : src_arg_list) {
std::string src_name = arg.name(); if (arg.no_gradient() && !is_grad) continue;
std::string dst_name = is_grad ? src_name + kGradVarSuffix : src_name; const std::string src_name = arg.name();
grad_idxs[dst_name] = idx++; std::string dst_name = is_grad ? GradVarName(src_name) : src_name;
int src_arg_idx = src_op->in_out_idxs_->at(src_name); dst_inout[dst_name].reserve(src_inout.at(src_name).size());
int src_begin = for (auto& var_name : src_inout.at(src_name)) {
src_format == nullptr ? src_arg_idx : src_format->at(src_arg_idx); std::string s = is_grad ? GradVarName(var_name) : var_name;
int src_end = src_format == nullptr ? src_arg_idx + 1 dst_inout[dst_name].emplace_back(s);
: src_format->at(src_arg_idx + 1);
for (int i = src_begin; i < src_end; ++i) {
std::string s =
is_grad ? src_inout[i] + kGradVarSuffix
: (arg.ignore_gradient() ? kEmptyVarName : src_inout[i]);
dst_inout.emplace_back(s);
}
if (dst_format != nullptr) {
dst_format->push_back(dst_inout.size());
} }
} }
} }
...@@ -82,44 +47,17 @@ OperatorBase* BuildGradOp(const OperatorBase* op) { ...@@ -82,44 +47,17 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.", PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.",
op->type_); op->type_);
AttributeMap grad_attrs(op->Attrs()); OperatorBase::VarNameMap inputs;
grad_attrs.erase("input_format"); OperatorBase::VarNameMap outputs;
grad_attrs.erase("output_format"); TransOpArg(op, OpArgType::IN, false, &inputs); // I
if (op->Attrs().count("input_format") > 0) { TransOpArg(op, OpArgType::OUT, false, &inputs); // O
grad_attrs["output_format"] = std::vector<int>({0}); TransOpArg(op, OpArgType::OUT, true, &inputs); // OG
} TransOpArg(op, OpArgType::IN, true, &outputs); // IG
if (op->Attrs().count("input_format") > 0 ||
op->Attrs().count("output_format") > 0) {
grad_attrs["input_format"] = std::vector<int>({0});
}
std::vector<std::string> grad_inputs, grad_outputs;
using VarIndexMap = std::unordered_map<std::string, int>;
VarIndexMap* grad_idxs = new VarIndexMap;
int in_idx = 0;
int out_idx = 0;
TransOpArg(op, grad_inputs, grad_outputs, grad_attrs, *grad_idxs,
"input_format", "input_format", in_idx, false); // I
TransOpArg(op, grad_inputs, grad_outputs, grad_attrs, *grad_idxs,
"output_format", "input_format", in_idx, false); // G
TransOpArg(op, grad_inputs, grad_outputs, grad_attrs, *grad_idxs,
"output_format", "input_format", in_idx, true); // OG
TransOpArg(op, grad_inputs, grad_outputs, grad_attrs, *grad_idxs,
"input_format", "output_format", out_idx, true); // IG
it = OpRegistry::op_info_map().find(grad_op_type); it = OpRegistry::op_info_map().find(grad_op_type);
PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(),
"'%s' has not been registered.", grad_op_type); "'%s' has not been registered.", grad_op_type);
OperatorBase* grad_op = it->second.creator_(); return it->second.creator_(grad_op_type, inputs, outputs, op->attrs_);
grad_op->type_ = grad_op_type;
grad_op->inputs_ = grad_inputs;
grad_op->outputs_ = grad_outputs;
grad_op->attrs_ = grad_attrs;
grad_op->in_out_idxs_.reset(grad_idxs);
return grad_op;
} }
} // namespace framework } // namespace framework
......
...@@ -13,10 +13,10 @@ class MutiInOutOpMaker : public OpProtoAndCheckerMaker { ...@@ -13,10 +13,10 @@ class MutiInOutOpMaker : public OpProtoAndCheckerMaker {
MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("In1", "a single input"); AddInput("In1", "a single input");
AddInput("In2_mult", "a multiple input").SetMultiple(); AddInput("In2_mult", "a multiple input").AsDuplicable();
AddInput("In3", "another single input"); AddInput("In3", "another single input");
AddOutput("Out1", "a single output"); AddOutput("Out1", "a single output");
AddOutput("Out2_mult", "a multiple output").SetMultiple(); AddOutput("Out2_mult", "a multiple output").AsDuplicable();
AddComment("test op with multiple inputs and outputs"); AddComment("test op with multiple inputs and outputs");
} }
}; };
...@@ -26,10 +26,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { ...@@ -26,10 +26,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("In1", "a single input"); AddInput("In1", "a single input");
AddInput("In2_mult", "a multiple input").SetMultiple().IgnoreGradient(); AddInput("In2_mult", "a multiple input").AsDuplicable().AsNoGradient();
AddInput("In3_mult", "another multiple input").SetMultiple(); AddInput("In3_mult", "another multiple input").AsDuplicable();
AddOutput("Out1_mult", "a multiple output").SetMultiple(); AddOutput("Out1_mult", "a multiple output").AsDuplicable();
AddOutput("Out2", "a single output").IgnoreGradient(); AddOutput("Out2", "a single output").AsNoGradient();
AddComment("op with inputs and outputs ignored in gradient calculating"); AddComment("op with inputs and outputs ignored in gradient calculating");
} }
}; };
...@@ -40,33 +40,34 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { ...@@ -40,33 +40,34 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
namespace f = paddle::framework; namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) { TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op( std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
f::OpRegistry::CreateOp("add_two", {"x", "y"}, {"out"}, {})); "add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op = std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op); f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(static_cast<int>(grad_add_op->inputs_.size()), 4); EXPECT_EQ(grad_add_op->inputs_.size(), 4UL);
EXPECT_EQ(static_cast<int>(grad_add_op->outputs_.size()), 2); EXPECT_EQ(grad_add_op->outputs_.size(), 2UL);
EXPECT_EQ(grad_add_op->Input("X"), "x"); EXPECT_EQ(grad_add_op->Input("X"), "x");
EXPECT_EQ(grad_add_op->Input("Y"), "y"); EXPECT_EQ(grad_add_op->Input("Y"), "y");
EXPECT_EQ(grad_add_op->Input("Out"), "out"); EXPECT_EQ(grad_add_op->Input("Out"), "out");
EXPECT_EQ(grad_add_op->Input("Out@GRAD"), "out@GRAD"); EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out"));
EXPECT_EQ(grad_add_op->Output("X@GRAD"), "x@GRAD"); EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x"));
EXPECT_EQ(grad_add_op->Output("Y@GRAD"), "y@GRAD"); EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y"));
} }
REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP);
REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP);
TEST(GradOpBuilder, MutiInOut) { TEST(GradOpBuilder, MutiInOut) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 4, 5}},
{"output_format", std::vector<int>{0, 1, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"mult_io", {"in1", "in2_1", "in2_2", "in2_3", "in3"}, "mult_io",
{"out1", "out2_1", "out2_2"}, attrs)); {{"In1", {"in1"}},
{"In2_mult", {"in2_1", "in2_2", "in2_3"}},
{"In3", {"in3"}}},
{{"Out1", {"out1"}}, {"Out2_mult", {"out2_1", "out2_2"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_test_op = std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op); f::OpRegistry::CreateGradOp(*test_op);
ASSERT_EQ(grad_test_op->inputs_.size(), 5UL + 3UL + 3UL); ASSERT_EQ(grad_test_op->inputs_.size(), 3UL + 2UL + 2UL);
EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Input("In1"), "in1");
EXPECT_EQ(grad_test_op->Inputs("In2_mult"), EXPECT_EQ(grad_test_op->Inputs("In2_mult"),
std::vector<std::string>({"in2_1", "in2_2", "in2_3"})); std::vector<std::string>({"in2_1", "in2_2", "in2_3"}));
...@@ -80,7 +81,7 @@ TEST(GradOpBuilder, MutiInOut) { ...@@ -80,7 +81,7 @@ TEST(GradOpBuilder, MutiInOut) {
std::vector<std::string>( std::vector<std::string>(
{f::GradVarName("out2_1"), f::GradVarName("out2_2")})); {f::GradVarName("out2_1"), f::GradVarName("out2_2")}));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->outputs_.size(), 3UL);
EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
std::vector<std::string>({f::GradVarName("in2_1"), std::vector<std::string>({f::GradVarName("in2_1"),
...@@ -90,31 +91,29 @@ TEST(GradOpBuilder, MutiInOut) { ...@@ -90,31 +91,29 @@ TEST(GradOpBuilder, MutiInOut) {
} }
TEST(GradOpBuilder, IOIgnoredInGradient) { TEST(GradOpBuilder, IOIgnoredInGradient) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 3, 5}},
{"output_format", std::vector<int>{0, 2, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"io_ignored", {"in1", "in2_1", "in2_2", "in3_1", "in3_2"}, "io_ignored",
{"out1_1", "out1_2", "out2"}, attrs)); {{"In1", {"in1"}},
{"In2_mult", {"in2_1", "in2_2"}},
{"In3_mult", {"in3_1", "in3_2"}}},
{{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_test_op = std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op); f::OpRegistry::CreateGradOp(*test_op);
// 'In2' and 'Out2' are ignored in gradient calculating // 'In2' and 'Out2' are ignored in gradient calculating
ASSERT_EQ(grad_test_op->inputs_.size(), 5UL + 3UL + 3UL); ASSERT_EQ(grad_test_op->inputs_.size(), 2UL + 1UL + 2UL);
EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Input("In1"), "in1");
EXPECT_EQ(grad_test_op->Inputs("In2_mult"),
std::vector<std::string>({f::kEmptyVarName, f::kEmptyVarName}));
EXPECT_EQ(grad_test_op->Inputs("In3_mult"), EXPECT_EQ(grad_test_op->Inputs("In3_mult"),
std::vector<std::string>({"in3_1", "in3_2"})); std::vector<std::string>({"in3_1", "in3_2"}));
EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), EXPECT_EQ(grad_test_op->Inputs("Out1_mult"),
std::vector<std::string>({"out1_1", "out1_2"})); std::vector<std::string>({"out1_1", "out1_2"}));
EXPECT_EQ(grad_test_op->Input("Out2"), f::kEmptyVarName);
EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")), EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")),
std::vector<std::string>( std::vector<std::string>(
{f::GradVarName("out1_1"), f::GradVarName("out1_2")})); {f::GradVarName("out1_1"), f::GradVarName("out1_2")}));
EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")),
f::GradVarName("out2")); f::GradVarName("out2"));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->outputs_.size(), 3UL);
EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
std::vector<std::string>( std::vector<std::string>(
......
...@@ -19,32 +19,59 @@ ...@@ -19,32 +19,59 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
LODTensor LODTensor::SliceShared(size_t level_begin, size_t level_end) const { LODTensor::LOD LODTensor::LOD::SliceLevels(size_t level_begin,
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); size_t level_end) const {
auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); LOD new_lod;
// slice levels just need to update LOD info, each level will contains the new_lod.reserve(level_end - level_begin);
// whole tensor_, so no need to modify tensor_. for (size_t i = level_begin; i < level_end; i++) {
return LODTensor(tensor_, new_lod); new_lod.emplace_back(at(i));
}
return new_lod;
} }
LODTensor LODTensor::SliceShared(size_t level, size_t elem_begin, LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const { size_t elem_end) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); // slice the lod.
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, LOD new_lod;
NumLevels()); new_lod.reserve(size() - level);
PADDLE_ENFORCE(elem_begin < NumElements(level), auto start = this->at(level)[elem_begin];
"element begin [%d] out of range [%d]", elem_begin, auto end = this->at(level)[elem_end];
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1, for (auto it = this->begin() + level; it != this->end(); it++) {
"element end [%d] out of range [%d]", elem_end, auto it_begin = std::find(it->begin(), it->end(), start);
NumElements(level)); auto it_end = std::find(it_begin, it->end(), end);
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info");
auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info");
true /*tensor_shared*/); new_lod.emplace_back(it_begin, it_end + 1);
// reset offset if tensor is copyed and sliced.
// slice elements just need to update LOD info, because offsets are not std::transform(new_lod.back().begin(), new_lod.back().end(),
// changed, so the original tensor_ can be reused. new_lod.back().begin(),
return LODTensor(tensor_, new_lod); [start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD");
}
PADDLE_ENFORCE_LE(new_lod.size(), this->size());
return new_lod;
}
bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) {
if (a.size() != b.size()) {
return false;
}
for (size_t i = 0; i < a.size(); i++) {
const auto& a_level = a[i];
const auto& b_level = b[i];
if (a_level.size() != b_level.size()) {
return false;
}
for (size_t j = 0; j < a_level.size(); j++) {
if (a_level[j] != b_level[j]) {
return false;
}
}
}
return true;
} }
} // namespace framework } // namespace framework
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#pragma once #pragma once
#include <memory> #include <memory>
#if (!PADDLE_ONLY_CPU) #if !defined(PADDLE_ONLY_CPU)
#include <thrust/device_vector.h> #include <thrust/device_vector.h>
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#endif #endif
...@@ -31,30 +31,29 @@ namespace framework { ...@@ -31,30 +31,29 @@ namespace framework {
* LODTensor (Level of details Tensor) * LODTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LODTensor { class LODTensor : public Tensor {
public: public:
// Level save offsets of each unit. // Level save offsets of each unit.
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
using Level = std::vector<size_t>; template <typename T>
using Vector = std::vector<T>;
#else #else
using Level = thrust::device_vector<size_t>; template <typename T>
using Vector = thrust::host_vector<T>;
#endif #endif
// LOD stores offsets of each level of units, the largest units level first, // LoD stores offsets of each level of units, the largest units level first,
// then the smaller units level. Each Level stores the offsets of units in // then the smaller units level. Each Level stores the offsets of units in
// Tesor. // Tesor.
typedef std::vector<Level> LOD; class LOD : public std::vector<Vector<size_t>> {
public:
LOD SliceLevels(size_t level_begin, size_t level_end) const;
LOD SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) const;
};
LODTensor() {} LODTensor() {}
LODTensor(const std::shared_ptr<Tensor> &tensor, explicit LODTensor(const LOD &lod) : lod_(lod) {}
const std::shared_ptr<LOD> &lod) {
Reset(tensor, lod);
}
void Reset(const std::shared_ptr<Tensor> &tensor, virtual Tensor *Clone() const { return new LODTensor(lod_); }
const std::shared_ptr<LOD> &lod) {
tensor_ = tensor;
lod_start_pos_ = lod;
}
/* /*
* Get a element from LOD. * Get a element from LOD.
...@@ -65,16 +64,14 @@ class LODTensor { ...@@ -65,16 +64,14 @@ class LODTensor {
PADDLE_ENFORCE(elem < NumElements(level), PADDLE_ENFORCE(elem < NumElements(level),
"element begin [%d] out of range [%d]", elem, "element begin [%d] out of range [%d]", elem,
NumElements(level)); NumElements(level));
return (*lod_start_pos_)[level][elem]; return (lod_)[level][elem];
} }
/* /*
* Number of LODTensor's levels, each level has units of data, for example, * Number of LODTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels. * in the sentence's view, article, paragraph, sentence are 3 levels.
*/ */
size_t NumLevels() const { size_t NumLevels() const { return lod_.size(); }
return lod_start_pos_ ? lod_start_pos_->size() : 0UL;
}
/* /*
* Number of elements in a level. * Number of elements in a level.
*/ */
...@@ -82,64 +79,71 @@ class LODTensor { ...@@ -82,64 +79,71 @@ class LODTensor {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels()); NumLevels());
// the last offset is the end of last element // the last offset is the end of last element
return lod_start_pos_->at(level).size() - 1; return lod_[level].size() - 1;
} }
/*
* Slice of levels[level_begin:level_end], with tensor copied.
*/
template <typename T>
LODTensor SliceCopied(size_t level_begin, size_t level_end,
const platform::Place &dst_place) const;
/* /*
* Slice of levels[level_begin:level_end], with tensor shared. * Slice of levels[level_begin:level_end], with tensor shared.
*/ */
LODTensor SliceShared(size_t level_begin, size_t level_end) const;
/*
* Slice of elements of a level, [elem_begin: elem_end], with tensor copied.
* @note: low performance in slice lod_start_pos_.
*/
template <typename T> template <typename T>
LODTensor SliceCopied(size_t level, size_t elem_begin, size_t elem_end, LODTensor SliceLevels(size_t level_begin, size_t level_end) const;
const platform::Place &dst_place) const;
/* /*
* Slice of elements of a level, [elem_begin: elem_end], with tensor shared. * Slice of elements of a level, [elem_begin: elem_end], with tensor shared.
* @note: low performance in slice lod_start_pos_. * @note: low performance in slice lod_.
*/
LODTensor SliceShared(size_t level, size_t elem_begin, size_t elem_end) const;
/*
* Copy other's lod_start_pos_, to share LOD info.
* @note: the LOD info should not be changed.
*/ */
void ShareLOD(const LODTensor &other) { template <typename T>
lod_start_pos_ = other.lod_start_pos_; LODTensor SliceInLevel(size_t level, size_t elem_begin,
} size_t elem_end) const;
/* /*
* Copy other's lod_start_pos_'s content, free to mutate. * Copy other's lod_'s content, free to mutate.
*/ */
void CopyLOD(const LODTensor &other) { void CopyLOD(const LODTensor &other) { lod_ = other.lod_; }
lod_start_pos_ = std::make_shared<LOD>(*other.lod_start_pos_);
}
/* /*
* Determine whether LODTensor has a valid LOD info. * Determine whether LODTensor has a valid LOD info.
*/ */
bool HasLOD() const { return bool(lod_start_pos_); } const LOD &lod() const { return lod_; }
LOD *lod() const { return lod_start_pos_.get(); } LOD *mutable_lod() { return &lod_; }
std::shared_ptr<Tensor> &tensor() { return tensor_; } virtual ~LODTensor() {}
Tensor *raw_tensor() { return tensor_.get(); }
private: private:
std::shared_ptr<LOD> lod_start_pos_; LOD lod_;
std::shared_ptr<Tensor> tensor_;
}; };
bool operator==(const LODTensor::LOD &a, const LODTensor::LOD &b);
template <typename T>
LODTensor LODTensor::SliceLevels(size_t level_begin, size_t level_end) const {
auto new_lod = lod_.SliceLevels(level_begin, level_end);
// slice levels just need to update LOD info, each level will contains the
// whole tensor_, so no need to modify tensor_.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
template <typename T>
LODTensor LODTensor::SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = lod_.SliceInLevel(level, elem_begin, elem_end);
// slice elements just need to update LOD info, because offsets are not
// changed, so the original tensor_ can be reused.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
#include "paddle/framework/lod_tensor_impl.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/details/lod_tensor.h"
namespace paddle {
namespace framework {
template <typename T>
LODTensor LODTensor::SliceCopied(size_t level_begin, size_t level_end,
const platform::Place &dst_place) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced.");
auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end);
auto new_tensor = std::make_shared<Tensor>();
new_tensor->CopyFrom<T>(*tensor_, dst_place);
return LODTensor(new_tensor, new_lod);
}
template <typename T>
LODTensor LODTensor::SliceCopied(size_t level, size_t elem_begin,
size_t elem_end,
const platform::Place &dst_place) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced.");
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end,
false /*tensor_shared*/);
auto start_idx = new_lod->front().front();
auto end_idx = new_lod->front().back() - 1 /*the next element's start*/;
auto sliced_tensor = tensor_->Slice<T>(start_idx, end_idx);
auto new_tensor = std::make_shared<Tensor>();
new_tensor->CopyFrom<T>(sliced_tensor, dst_place);
return LODTensor(new_tensor, new_lod);
}
} // namespace framework
} // namespace paddle
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <glog/logging.h> #include <glog/logging.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <algorithm>
#include <memory> #include <memory>
namespace paddle { namespace paddle {
...@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test { ...@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test {
// 0 10 20 // 0 10 20
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
auto lod = std::make_shared<LODTensor::LOD>(); LODTensor::LOD lod;
lod->push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 10, 20});
lod->push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
lod->push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
auto tensor = std::make_shared<Tensor>(); ASSERT_EQ(lod.size(), 3UL);
tensor->Resize({20 /*batch size*/, 128 /*dim*/});
tensor.Resize({20 /*batch size*/, 128 /*dim*/});
// malloc memory // malloc memory
tensor->mutable_data<float>(place); tensor.mutable_data<float>(place);
lod_tensor.reset(new LODTensor(lod));
lod_tensor->Resize({20 /*batch size*/, 128 /*dim*/});
lod_tensor->Reset(tensor, lod); lod_tensor->ShareDataWith<float>(tensor);
// lod_tensor->ShareDataWith<Tensor>(tensor);
} }
protected: protected:
std::unique_ptr<LODTensor> lod_tensor; std::unique_ptr<LODTensor> lod_tensor;
platform::CPUPlace place; platform::CPUPlace place;
Tensor tensor;
}; };
TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); } TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); }
...@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) { ...@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) {
ASSERT_EQ(lod_tensor->NumElements(2), 8UL); ASSERT_EQ(lod_tensor->NumElements(2), 8UL);
} }
TEST_F(LODTensorTester, SliceShared_Level) { TEST_F(LODTensorTester, SliceLevels) {
// slice 1 level
for (size_t level = 0; level < 3UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceShared(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
}
// slice 2 level
for (size_t level = 0; level < 2UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceShared(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor->NumElements(level + 1));
ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
}
}
TEST_F(LODTensorTester, SliceCopied_Level) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
auto new_lod_tensor = auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 1);
lod_tensor->SliceCopied<float>(level, level + 1, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); // ASSERT_EQ(new_lod_tensor, *lod_tensor);
// TODO(superjom) add tensor comparation here.
} }
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
auto new_lod_tensor = auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 2);
lod_tensor->SliceCopied<float>(level, level + 2, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1), ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor->NumElements(level + 1)); lod_tensor->NumElements(level + 1));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
// TODO(superjom) add tensor comparation here.
} }
} }
TEST_F(LODTensorTester, SliceShared_Element) { TEST_F(LODTensorTester, SliceInLevel) {
size_t level = 0;
auto new_lod_tensor = lod_tensor->SliceShared(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor());
level = 1;
new_lod_tensor = lod_tensor->SliceShared(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor());
}
TEST_F(LODTensorTester, SliceCopied_Element) {
size_t level = 0; size_t level = 0;
auto new_lod_tensor = lod_tensor->SliceCopied<float>(level, 0, 2, place); auto new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
level = 1; level = 1;
new_lod_tensor = lod_tensor->SliceCopied<float>(level, 0, 2, place); new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
level = 1;
// LOD is
// 0 5 10
// 0 2 5 7 10
new_lod_tensor = lod_tensor->SliceCopied<float>(level, 1, 3, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.lod_element(0, 0), 0UL);
ASSERT_EQ(new_lod_tensor.lod_element(0, 1), 5UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 0), 0UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 1), 2UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 2), 5UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 3), 7UL);
// TODO(superjom) compare the content of these tensors
} }
TEST_F(LODTensorTester, ShareLOD) { TEST_F(LODTensorTester, ShareLOD) {
LODTensor new_lod_tensor; LODTensor new_lod_tensor;
new_lod_tensor.ShareLOD(*lod_tensor); new_lod_tensor.CopyLOD(*lod_tensor);
ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod()); ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod());
} }
TEST_F(LODTensorTester, CopyLOD) { TEST_F(LODTensorTester, CopyLOD) {
LODTensor new_lod_tensor; LODTensor new_lod_tensor;
new_lod_tensor.CopyLOD(*lod_tensor); new_lod_tensor.CopyLOD(*lod_tensor);
ASSERT_NE(new_lod_tensor.lod(), lod_tensor->lod()); bool equals = std::equal(lod_tensor->lod().begin(), lod_tensor->lod().end(),
new_lod_tensor.lod().begin());
ASSERT_TRUE(equals);
} }
} // namespace framework } // namespace framework
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
package paddle.framework;
import "attribute.proto";
// AttrDesc is used to describe Attributes of an Operator. It contain's
// name, type, and value of Attribute.
//
// e.g, for scale=3.0: name=scala, type=AttrType.FLOAT, value=3.0
message AttrDesc {
required string name = 1;
required AttrType type = 2;
optional int32 i = 3;
optional float f = 4;
optional string s = 5;
repeated int32 ints = 6;
repeated float floats = 7;
repeated string strings = 8;
};
// Protocol Message to describe an Operator.
//
// In PaddlePaddle, Operator is used to do a certain computation such
// as "add", "sub", "cosine", etc.
// (1) Operator needs to know the input and output variable names.
// (2) Some ops may have special attributes such as "scale" in "CosineOp".
//
// 3rd-party language can build this proto message and call
// AddOp(const OpDesc& op_desc) of Paddle core to create an Operator.
message OpDesc {
// input names of this Operator.
repeated string inputs = 1;
// output names of this Operator.
repeated string outputs = 2;
// type of this Operator, such as "add", "sub", "fc".
required string type = 3;
// Attributes of this Operator. e.g., scale=3.0 in cosine op.
repeated AttrDesc attrs = 4;
};
\ No newline at end of file
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/framework/op_desc.pb.h>
TEST(OpDesc, Create) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("add");
op_desc.add_inputs("X");
op_desc.add_inputs("Y");
op_desc.add_outputs("Z");
auto attr = op_desc.mutable_attrs()->Add();
attr->set_type(paddle::framework::AttrType::FLOAT);
attr->set_f(3.14);
// required field name is not set, so IsInitialized should be false.
ASSERT_FALSE(op_desc.IsInitialized());
attr->set_name("add");
// after all required fields are set, IsInitialized should be true now.
ASSERT_TRUE(op_desc.IsInitialized());
}
\ No newline at end of file
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Protocol Message for 3rd-party language binding.
//
// Paddle Python package will use `OpProto` to generate op creation methods.
// The op creation methods take user's input and generate `OpDesc` proto
// message,
// then pass `OpDesc` to C++ side and create Op pointer.
//
syntax = "proto2";
package paddle.framework;
import "attribute.proto";
// Attribute protocol message for 3rd-party language binding.
// It will store the Op support what attribute and what type.
message AttrProto {
// Supported attribute name. e.g. `scale` for cosine op.
required string name = 1;
// Supported attribute type.
required AttrType type = 2;
// Supported attribute comments. It helps 3rd-party language generate
// doc-string.
required string comment = 3;
// If that attribute is generated, it means the Paddle third language
// binding has responsibility to fill that attribute. End-User should
// not set that attribute.
optional bool generated = 4 [ default = false ];
}
// Input or output message for 3rd-party language binding.
// It contains parameter name and its comments.
message VarProto {
// Input or output name in that op creation function.
// e.g. `cos(a, b, output, ...)`, "a", "b", "output" are names.
required string name = 1;
// The comment for that input. It helps 3rd-party language generate
// doc-string.
required string comment = 2;
// Is that input/output could be a list or not.
// If so, that Op should write a attributed named `input_format` or
// `output_format`.
//
// e.g.
// If the op is a fc op, the inputs are `X`, `W`, `b`. The `X` and `W`
// could be multiple, so the multiple of `X` and `W` is True, and OpDesc
// will hold a attribute of them.
//
// The Op desc of same fc could be
// {
// "type": "fc",
// "input": ["X1", "X2", "W1", "W2", "b"],
// "output": "fc.out",
// "attrs" : {
// "input_format": [0, 2, 4, 5]
// }
// }
//
optional bool multiple = 3 [ default = false ];
// It marks that output is a temporary output. That output is not used by
// user, but used by other op internally as input. If other op is not use
// that output, it could be optimized early.
//
// Attribute temporary_index will be set in OpDesc if there is some
// outputs are temporary.
//
// output = [ "xxx.out1", "xxx.tmp", "xxx.out2"],
// attrs = {
// "temporary_index": [1]
// }
optional bool temporary = 4 [ default = false ];
// The gradient of operator can be ignored immediately
// e.g. operator AddOp, y = x1 + x2, the gradient of dy/dx1, dy/dx2
// can be ignored for the future optimized on graph.
optional bool ignore_gradient = 6;
}
// Op protocol message for 3rd-party language binding.
// It contains all information for generating op creation method.
message OpProto {
// The input information to generate op creation method.
repeated VarProto inputs = 1;
// The output information to generate op creation method.
repeated VarProto outputs = 2;
// The attribute information to generate op creation method.
repeated AttrProto attrs = 3;
// The comments for that Op. It helps 3rd-party language generate
// doc-string. The whole documentation of that Op is generated by comment,
// inputs, outputs, attrs together.
required string comment = 4;
// The type of that Op.
required string type = 5;
}
#include <gtest/gtest.h>
#include <paddle/framework/op_proto.pb.h>
TEST(TestOpProto, ALL) {
paddle::framework::OpProto proto;
{
auto ipt = proto.mutable_inputs()->Add();
*ipt->mutable_name() = "a";
*ipt->mutable_comment() = "the one input of cosine op";
}
{
auto ipt = proto.mutable_inputs()->Add();
*ipt->mutable_name() = "b";
*ipt->mutable_comment() = "the other input of cosine op";
}
{
auto opt = proto.mutable_outputs()->Add();
*opt->mutable_name() = "output";
*opt->mutable_comment() = "the output of cosine op";
}
{
auto attr = proto.mutable_attrs()->Add();
*attr->mutable_name() = "scale";
attr->set_type(paddle::framework::AttrType::FLOAT);
*attr->mutable_comment() = "the scale attribute of cosine op";
}
proto.set_type("cos");
*proto.mutable_comment() = "cosine op, output = scale * cos(a, b)";
ASSERT_TRUE(proto.IsInitialized());
}
\ No newline at end of file
...@@ -21,8 +21,9 @@ limitations under the License. */ ...@@ -21,8 +21,9 @@ limitations under the License. */
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_desc.pb.h" #include "paddle/framework/operator.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
namespace paddle { namespace paddle {
...@@ -45,52 +46,48 @@ class OpProtoAndCheckerMaker { ...@@ -45,52 +46,48 @@ class OpProtoAndCheckerMaker {
protected: protected:
struct VariableBuilder { struct VariableBuilder {
VarProto* var_; OpProto::Var* var_;
std::function<void()> on_multiple_;
std::function<void()> on_temporary_;
VariableBuilder& SetMultiple() { VariableBuilder& AsDuplicable() {
var_->set_multiple(true); var_->set_duplicable(true);
on_multiple_();
return *this; return *this;
} }
VariableBuilder& SetTemporary() { VariableBuilder& AsIntermediate() {
PADDLE_ENFORCE(bool(on_temporary_), "Cannot set temporary"); var_->set_intermediate(true);
var_->set_temporary(true);
on_temporary_();
return *this; return *this;
} }
VariableBuilder& IgnoreGradient() { // TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it
var_->set_ignore_gradient(true); // means that input/output is not needed when calculate gradient. It does
// not mean no gradient when backward. It should be changed soon.
VariableBuilder& AsNoGradient() {
var_->set_no_gradient(true);
return *this; return *this;
} }
}; };
VariableBuilder AddInput(const std::string& name, VariableBuilder AddInput(const std::string& name,
const std::string& comment) { const std::string& comment) {
VarProto* input = proto_->add_inputs(); auto* input = proto_->add_inputs();
input->set_name(name); input->set_name(name);
input->set_comment(comment); input->set_comment(comment);
return VariableBuilder{input, [=] { this->SetHasMultipleInput(); }, return VariableBuilder{input};
nullptr};
} }
VariableBuilder AddOutput(const std::string& name, VariableBuilder AddOutput(const std::string& name,
const std::string& comment) { const std::string& comment) {
VarProto* output = proto_->add_outputs(); auto* output = proto_->add_outputs();
output->set_name(name); output->set_name(name);
output->set_comment(comment); output->set_comment(comment);
return VariableBuilder{output, [=] { this->SetHasMultipleOutput(); }, return VariableBuilder{output};
[=] { this->SetHasTemporaryOutput(); }};
} }
template <typename T> template <typename T>
TypedAttrChecker<T>& AddAttr(const std::string& name, TypedAttrChecker<T>& AddAttr(const std::string& name,
const std::string& comment, const std::string& comment,
bool generated = false) { bool generated = false) {
AttrProto* attr = proto_->add_attrs(); auto* attr = proto_->add_attrs();
attr->set_name(name); attr->set_name(name);
attr->set_comment(comment); attr->set_comment(comment);
attr->set_generated(generated); attr->set_generated(generated);
...@@ -101,53 +98,6 @@ class OpProtoAndCheckerMaker { ...@@ -101,53 +98,6 @@ class OpProtoAndCheckerMaker {
void AddComment(const std::string& comment) { proto_->set_comment(comment); } void AddComment(const std::string& comment) { proto_->set_comment(comment); }
private: private:
void SetHasMultiple(const std::string& in_out, bool* flag) {
if (!*flag) {
AddAttr<std::vector<int>>(in_out + "_format",
"The multiple index of " + in_out +
"\n"
R"DOC(
This attribute is used by Paddle core framework. Paddle's Op support each input
or output could be a list of variable. This attribute is used to show how that
list organized.
e.g.
input = ["a", "b", "c", "d", "e", "f"]
input_format = [0, 4, 5, 6]
means
The number of all input variables this op is six, and they are segmented into
three inputs.
The first input is input[0:4], second is input[4:5], third is input[5:6].
)DOC",
/*generated*/ true);
*flag = true;
}
}
void SetHasMultipleInput() { SetHasMultiple("input", &has_multiple_input_); }
void SetHasMultipleOutput() {
SetHasMultiple("output", &has_multiple_output_);
}
void SetHasTemporaryOutput() {
if (!has_temporary_output_) {
AddAttr<std::vector<int>>("temporary_index",
R"DOC(The temporary index of output.
Not all output of Paddle Op is used by user. For faster computation, each op
could output some its internal state to other op, other op could take that
output to make compute faster.
Add a mark to which output is temporary is helpful for future optimization.
)DOC",
/*generated*/ true)
.SetDefault(std::vector<int>());
has_temporary_output_ = true;
}
}
void CheckNoDuplicatedInOutAttrs() { void CheckNoDuplicatedInOutAttrs() {
std::unordered_set<std::string> names; std::unordered_set<std::string> names;
auto checker = [&](const std::string& name) { auto checker = [&](const std::string& name) {
...@@ -168,9 +118,6 @@ Add a mark to which output is temporary is helpful for future optimization. ...@@ -168,9 +118,6 @@ Add a mark to which output is temporary is helpful for future optimization.
OpProto* proto_; OpProto* proto_;
OpAttrChecker* op_checker_; OpAttrChecker* op_checker_;
bool validated_{false}; bool validated_{false};
bool has_multiple_input_{false};
bool has_multiple_output_{false};
bool has_temporary_output_{false};
}; };
class NOPMaker : public OpProtoAndCheckerMaker { class NOPMaker : public OpProtoAndCheckerMaker {
...@@ -187,8 +134,10 @@ struct OpInfo { ...@@ -187,8 +134,10 @@ struct OpInfo {
}; };
class OpRegistry { class OpRegistry {
using VarIndexMap = std::unordered_map<std::string, int>; using VarNameMap = OperatorBase::VarNameMap;
using VarNameList = std::vector<std::string>; using OpCreator = std::function<OperatorBase*(
const std::string& /*type*/, const VarNameMap& /*inputs*/,
const VarNameMap& /*outputs*/, const AttributeMap& /*attrs*/)>;
public: public:
template <typename OpType, typename ProtoMakerType, typename GradOpType> template <typename OpType, typename ProtoMakerType, typename GradOpType>
...@@ -197,7 +146,11 @@ class OpRegistry { ...@@ -197,7 +146,11 @@ class OpRegistry {
PADDLE_ENFORCE(op_info_map().count(op_type) == 0, PADDLE_ENFORCE(op_info_map().count(op_type) == 0,
"'%s' is registered more than once.", op_type); "'%s' is registered more than once.", op_type);
OpInfo op_info; OpInfo op_info;
op_info.creator_ = [] { return new OpType; }; op_info.creator_ = [](const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs,
const AttributeMap& attrs) {
return new OpType(type, inputs, outputs, attrs);
};
op_info.grad_op_type_ = grad_op_type; op_info.grad_op_type_ = grad_op_type;
if (std::type_index(typeid(ProtoMakerType)) != if (std::type_index(typeid(ProtoMakerType)) !=
std::type_index(typeid(NOPMaker))) { std::type_index(typeid(NOPMaker))) {
...@@ -210,18 +163,6 @@ class OpRegistry { ...@@ -210,18 +163,6 @@ class OpRegistry {
op_info.proto_->IsInitialized(), op_info.proto_->IsInitialized(),
"Fail to initialize %s's OpProto, because %s is not initialized", "Fail to initialize %s's OpProto, because %s is not initialized",
op_type, op_info.proto_->InitializationErrorString()); op_type, op_info.proto_->InitializationErrorString());
// ======will be refactored in following PRs============ //
VarIndexMaps()[op_type].reset(new VarIndexMap());
auto& varmap = *VarIndexMaps()[op_type];
int idx = 0;
for (auto& var : op_info.proto_->inputs()) {
varmap[var.name()] = idx++;
}
idx = 0;
for (auto& var : op_info.proto_->outputs()) {
varmap[var.name()] = idx++;
}
// ================================================ //
} else { } else {
op_info.proto_ = nullptr; op_info.proto_ = nullptr;
op_info.checker_ = nullptr; op_info.checker_ = nullptr;
...@@ -238,41 +179,29 @@ class OpRegistry { ...@@ -238,41 +179,29 @@ class OpRegistry {
const VarNameList& outputs, const VarNameList& outputs,
const AttributeMap& attrs) { const AttributeMap& attrs) {
auto it = op_info_map().find(type); auto it = op_info_map().find(type);
PADDLE_ENFORCE(it != op_info_map().end(), "'%s' has not been registered.", PADDLE_ENFORCE(it != op_info_map().end(),
type); "Operator '%s' has not been registered.", type);
it->second.checker_->Check(attrs);
auto op = it->second.creator_(); auto op = it->second.creator_(type, inputs, outputs, attrs);
op->type_ = type; return std::shared_ptr<OperatorBase>(op);
op->inputs_ = inputs; }
op->outputs_ = outputs;
op->attrs_ = attrs;
it->second.checker_->Check(op->attrs_);
GenerateTempVariableName(op);
{ static VarNameMap ConvertOpDescVarsToVarNameMap(
auto var_index_it = VarIndexMaps().find(type); const google::protobuf::RepeatedPtrField<OpDesc::Var>& op_desc_vars) {
if (var_index_it != VarIndexMaps().end()) { VarNameMap ret_val;
op->in_out_idxs_ = var_index_it->second; for (auto& var : op_desc_vars) {
} auto& var_names = ret_val[var.parameter()];
auto& var_names_in_proto = var.arguments();
var_names.reserve(static_cast<size_t>(var_names_in_proto.size()));
std::copy(var_names_in_proto.begin(), var_names_in_proto.end(),
std::back_inserter(var_names));
} }
return ret_val;
op->Init();
return std::shared_ptr<OperatorBase>(op);
} }
static std::shared_ptr<OperatorBase> CreateOp(const OpDesc& op_desc) { static std::shared_ptr<OperatorBase> CreateOp(const OpDesc& op_desc) {
std::vector<std::string> inputs; VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs());
inputs.reserve((size_t)op_desc.inputs_size()); VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs());
std::copy(op_desc.inputs().begin(), op_desc.inputs().end(),
std::back_inserter(inputs));
std::vector<std::string> outputs;
outputs.reserve((size_t)op_desc.outputs_size());
std::copy(op_desc.outputs().begin(), op_desc.outputs().end(),
std::back_inserter(outputs));
AttributeMap attrs; AttributeMap attrs;
for (auto& attr : op_desc.attrs()) { for (auto& attr : op_desc.attrs()) {
attrs[attr.name()] = GetAttrValue(attr); attrs[attr.name()] = GetAttrValue(attr);
...@@ -285,7 +214,6 @@ class OpRegistry { ...@@ -285,7 +214,6 @@ class OpRegistry {
PADDLE_ENFORCE(!op.IsNetOp(), PADDLE_ENFORCE(!op.IsNetOp(),
"Use framework::Backward to get backward ops"); "Use framework::Backward to get backward ops");
std::shared_ptr<OperatorBase> grad_op(BuildGradOp(&op)); std::shared_ptr<OperatorBase> grad_op(BuildGradOp(&op));
grad_op->Init();
return grad_op; return grad_op;
} }
...@@ -293,35 +221,17 @@ class OpRegistry { ...@@ -293,35 +221,17 @@ class OpRegistry {
static std::unordered_map<std::string, const OpInfo> op_info_map_; static std::unordered_map<std::string, const OpInfo> op_info_map_;
return op_info_map_; return op_info_map_;
} }
static std::unordered_map<std::string, std::shared_ptr<VarIndexMap>>&
VarIndexMaps() {
static std::unordered_map<std::string, std::shared_ptr<VarIndexMap>> maps_;
return maps_;
}
private:
static void GenerateTempVariableName(OperatorBase* op) {
static std::atomic<size_t> gUniqId(0UL);
for (auto& outname : op->outputs_) {
if (outname == kTempVarName) {
outname += op->type_;
outname += "@";
outname += std::to_string(gUniqId.fetch_add(1));
}
}
}
}; };
class Registrar { class Registrar {
public: public:
// In our design, various kinds of classes, e.g., operators and kernels, have // In our design, various kinds of classes, e.g., operators and kernels,
// their corresponding registry and registrar. The action of registration is // have their corresponding registry and registrar. The action of
// in the constructor of a global registrar variable, which, however, are not // registration is in the constructor of a global registrar variable, which,
// used in the code that calls package framework, and would be removed from // however, are not used in the code that calls package framework, and would
// the generated binary file by the linker. To avoid such removal, we add // be removed from the generated binary file by the linker. To avoid such
// Touch to all registrar classes and make USE_OP macros to call this // removal, we add Touch to all registrar classes and make USE_OP macros to
// method. So, as long as the callee code calls USE_OP, the global // call this method. So, as long as the callee code calls USE_OP, the global
// registrar variable won't be removed by the linker. // registrar variable won't be removed by the linker.
void Touch() {} void Touch() {}
}; };
...@@ -387,16 +297,6 @@ class OpKernelRegistrar : public Registrar { ...@@ -387,16 +297,6 @@ class OpKernelRegistrar : public Registrar {
return 0; \ return 0; \
} }
/**
* Macro to Forbid user register Gradient Operator.
*/
/*
#define NO_GRADIENT(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##op_type##_##op_type##_grad, \
"NO_GRADIENT must be called in global namespace")
*/
#define REGISTER_OP_GPU_KERNEL(op_type, ...) \ #define REGISTER_OP_GPU_KERNEL(op_type, ...) \
REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__) REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
......
...@@ -7,8 +7,7 @@ namespace paddle { ...@@ -7,8 +7,7 @@ namespace paddle {
namespace framework { namespace framework {
class CosineOp : public OperatorBase { class CosineOp : public OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(CosineOp, OperatorBase) using OperatorBase::OperatorBase;
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
...@@ -29,8 +28,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -29,8 +28,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
class MyTestOp : public OperatorBase { class MyTestOp : public OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(MyTestOp, OperatorBase) using OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
...@@ -40,8 +38,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -40,8 +38,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public: public:
MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of cosine op").SetMultiple(); AddInput("input", "input of cosine op").AsDuplicable();
AddOutput("output", "output of cosine op").SetTemporary(); AddOutput("output", "output of cosine op").AsIntermediate();
auto my_checker = [](int i) { auto my_checker = [](int i) {
PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!"); PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!");
}; };
...@@ -53,6 +51,14 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -53,6 +51,14 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
static void BuildVar(const std::string& param_name,
std::initializer_list<const char*> arguments,
paddle::framework::OpDesc::Var* var) {
var->set_parameter(param_name);
for (auto& arg_name : arguments) {
var->add_arguments(arg_name);
}
}
REGISTER_OP_WITHOUT_GRADIENT(cos_sim, paddle::framework::CosineOp, REGISTER_OP_WITHOUT_GRADIENT(cos_sim, paddle::framework::CosineOp,
paddle::framework::CosineOpProtoAndCheckerMaker); paddle::framework::CosineOpProtoAndCheckerMaker);
REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp, REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp,
...@@ -61,8 +67,8 @@ REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp, ...@@ -61,8 +67,8 @@ REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp,
TEST(OpRegistry, CreateOp) { TEST(OpRegistry, CreateOp) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
float scale = 3.3; float scale = 3.3;
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
...@@ -82,8 +88,8 @@ TEST(OpRegistry, CreateOp) { ...@@ -82,8 +88,8 @@ TEST(OpRegistry, CreateOp) {
TEST(OpRegistry, IllegalAttr) { TEST(OpRegistry, IllegalAttr) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
...@@ -107,8 +113,8 @@ TEST(OpRegistry, IllegalAttr) { ...@@ -107,8 +113,8 @@ TEST(OpRegistry, IllegalAttr) {
TEST(OpRegistry, DefaultValue) { TEST(OpRegistry, DefaultValue) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
ASSERT_TRUE(op_desc.IsInitialized()); ASSERT_TRUE(op_desc.IsInitialized());
...@@ -120,20 +126,11 @@ TEST(OpRegistry, DefaultValue) { ...@@ -120,20 +126,11 @@ TEST(OpRegistry, DefaultValue) {
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->GetAttr<float>("scale"), 1.0);
} }
static void SetInputFormat(paddle::framework::OpDesc* desc) {
auto attr = desc->add_attrs();
attr->set_name("input_format");
attr->set_type(paddle::framework::INTS);
attr->mutable_ints()->Add(0);
attr->mutable_ints()->Add(1);
}
TEST(OpRegistry, CustomChecker) { TEST(OpRegistry, CustomChecker) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("my_test_op"); op_desc.set_type("my_test_op");
op_desc.add_inputs("ii"); BuildVar("input", {"ii"}, op_desc.add_inputs());
op_desc.add_outputs("oo"); BuildVar("output", {"oo"}, op_desc.add_outputs());
SetInputFormat(&op_desc);
// attr 'test_attr' is not set // attr 'test_attr' is not set
bool caught = false; bool caught = false;
...@@ -173,7 +170,6 @@ TEST(OpRegistry, CustomChecker) { ...@@ -173,7 +170,6 @@ TEST(OpRegistry, CustomChecker) {
attr->set_name("test_attr"); attr->set_name("test_attr");
attr->set_type(paddle::framework::AttrType::INT); attr->set_type(paddle::framework::AttrType::INT);
attr->set_i(4); attr->set_i(4);
SetInputFormat(&op_desc);
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
paddle::framework::Scope scope; paddle::framework::Scope scope;
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
#include <algorithm>
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -34,83 +34,134 @@ ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const { ...@@ -34,83 +34,134 @@ ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const {
#endif #endif
const std::string& OperatorBase::Input(const std::string& name) const { const std::string& OperatorBase::Input(const std::string& name) const {
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, auto& ins = Inputs(name);
"Input Output Indices could not be nullptr"); PADDLE_ENFORCE_EQ(ins.size(), 1UL,
auto it = in_out_idxs_->find(name); "Op %s input %s should contain only one variable", type_,
PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", name);
name); return ins[0];
if (attrs_.count("input_format") == 0) {
return inputs_.at((size_t)it->second);
} else {
const auto& input_format = GetAttr<std::vector<int>>("input_format");
int idx = input_format[it->second];
return inputs_.at((size_t)idx);
}
} }
std::vector<std::string> OperatorBase::Inputs(const std::string& name) const { const std::vector<std::string>& OperatorBase::Inputs(
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "IO Idx could not be nullptr"); const std::string& name) const {
auto input_format = GetAttr<std::vector<int>>("input_format"); auto it = inputs_.find(name);
auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(it != inputs_.end(), "Op %s do not have input %s", type_,
PADDLE_ENFORCE(input_format.at(static_cast<size_t>(offset) + 1) <= name);
static_cast<int>(inputs_.size()), return it->second;
"Input Out Of Range");
return std::vector<std::string>{
inputs_.begin() + input_format.at(offset),
inputs_.begin() + input_format.at(offset + 1)};
} }
const std::string& OperatorBase::Output(const std::string& name) const { const std::string& OperatorBase::Output(const std::string& name) const {
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto& outs = Outputs(name);
auto it = in_out_idxs_->find(name); PADDLE_ENFORCE_EQ(outs.size(), 1UL,
PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", "Op %s output %s should contain only one variable", type_,
name); name);
if (attrs_.count("output_format") == 0) { return outs[0];
return outputs_.at((size_t)it->second);
} else {
const auto& output_format = GetAttr<std::vector<int>>("output_format");
int idx = output_format[it->second];
return outputs_.at((size_t)idx);
}
} }
std::vector<std::string> OperatorBase::Outputs(const std::string& name) const { const std::vector<std::string>& OperatorBase::Outputs(
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); const std::string& name) const {
auto output_format = GetAttr<std::vector<int>>("output_format"); auto it = outputs_.find(name);
auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output %s", type_,
PADDLE_ENFORCE(output_format.at(static_cast<size_t>(offset) + 1) <= name);
static_cast<int>(outputs_.size()), return it->second;
"Output Out of Range");
return std::vector<std::string>{
outputs_.begin() + output_format.at(offset),
outputs_.begin() + output_format.at(offset + 1)};
} }
std::string OperatorBase::DebugString() const { std::string OperatorBase::DebugString() const {
std::stringstream ss; std::stringstream ss;
ss << "Op(" << type_ << "), inputs:("; ss << "Op(" << type_ << "), inputs:{";
for (size_t i = 0; i < inputs_.size(); ++i) { for (auto it = inputs_.begin(); it != inputs_.end();) {
ss << inputs_[i]; auto& input = *it;
if (i != inputs_.size() - 1) { ss << input.first << "[";
for (size_t i = 0; i < input.second.size(); ++i) {
ss << input.second[i];
if (i != input.second.size() - 1) {
ss << ", ";
}
}
ss << "]";
++it;
if (it != inputs_.end()) {
ss << ", "; ss << ", ";
} }
} }
ss << "), outputs:("; ss << "}, outputs:{";
for (size_t i = 0; i < outputs_.size(); ++i) { for (auto it = outputs_.begin(); it != outputs_.end();) {
ss << outputs_[i]; auto& output = *it;
if (i != outputs_.size() - 1) { ss << output.first << "[";
for (size_t i = 0; i < output.second.size(); ++i) {
ss << output.second[i];
if (i != output.second.size() - 1) {
ss << ", ";
}
}
ss << "]";
++it;
if (it != outputs_.end()) {
ss << ", "; ss << ", ";
} }
} }
ss << ")."; ss << "}.";
return ss.str(); return ss.str();
} }
void OperatorBase::Rename(const std::string& old_name, void OperatorBase::Rename(const std::string& old_name,
const std::string& new_name) { const std::string& new_name) {
std::replace(inputs_.begin(), inputs_.end(), old_name, new_name); for (auto& input : inputs_) {
std::replace(outputs_.begin(), outputs_.end(), old_name, new_name); std::replace(input.second.begin(), input.second.end(), old_name, new_name);
}
for (auto& output : outputs_) {
std::replace(output.second.begin(), output.second.end(), old_name,
new_name);
}
}
OperatorBase::OperatorBase(const std::string& type,
const OperatorBase::VarNameMap& inputs,
const OperatorBase::VarNameMap& outputs,
const AttributeMap& attrs)
: type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
static std::atomic<size_t> gUniqId(0UL);
for (auto& output : outputs_) {
for (auto& output_name : output.second) {
if (output_name == kTempVarName) {
output_name += type_;
output_name += "@";
output_name += std::to_string(gUniqId.fetch_add(1));
}
}
}
}
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
std::vector<std::string> ret_val;
if (has_intermediate) {
// push all outputs into ret_val
for (auto& o : outputs_) {
ret_val.reserve(ret_val.size() + o.second.size());
ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
}
return ret_val;
}
auto it = OpRegistry::op_info_map().find(type_);
PADDLE_ENFORCE(
it != OpRegistry::op_info_map().end(),
"Operator %s not registered, cannot figure out intermediate outputs",
type_);
PADDLE_ENFORCE(
it->second.proto_ != nullptr,
"Operator %s has no OpProto, cannot figure out intermediate outputs",
type_);
// get all OpProto::Var for outputs
for (auto& o : it->second.proto_.outputs()) {
// ignore all intermediate output
if (o.intermediate()) continue;
auto out = outputs_.find(o.name());
if (out != outputs_.end()) {
ret_val.reserve(ret_val.size() + out->second.size());
ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
}
}
return ret_val;
} }
} // namespace framework } // namespace framework
......
...@@ -20,8 +20,7 @@ limitations under the License. */ ...@@ -20,8 +20,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/op_desc.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
...@@ -55,16 +54,6 @@ class OperatorBase; ...@@ -55,16 +54,6 @@ class OperatorBase;
class InferShapeContext; class InferShapeContext;
class ExecutionContext; class ExecutionContext;
#define DEFINE_OPERATOR_CTOR(Class, ParentClass) \
public: \
Class() { /* TODO(yi): This constructor is to be removed. */ \
} \
Class(const std::string& type, const std::vector<std::string>& inputs, \
const std::vector<std::string>& outputs, \
const ::paddle::framework::AttributeMap& attrs, \
std::unordered_map<std::string, int>* in_out_idxs) \
: ParentClass(type, inputs, outputs, attrs, in_out_idxs) {}
/** /**
* OperatorBase has the basic element that Net will call to do computation. * OperatorBase has the basic element that Net will call to do computation.
* Only CreateOperator from OpRegistry will new Operator directly. User * Only CreateOperator from OpRegistry will new Operator directly. User
...@@ -73,16 +62,14 @@ class ExecutionContext; ...@@ -73,16 +62,14 @@ class ExecutionContext;
*/ */
class OperatorBase { class OperatorBase {
public: public:
OperatorBase() {} // TODO(yi): This constructor is to be removed. using VarNameMap = std::map<std::string, std::vector<std::string>>;
OperatorBase(const std::string& type, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs, OperatorBase(const std::string& type, const VarNameMap& inputs,
const AttributeMap& attrs, const VarNameMap& outputs, const AttributeMap& attrs);
std::unordered_map<std::string, int>* in_out_idxs)
: type_(type), OperatorBase(const OperatorBase& o) = delete;
inputs_(inputs), OperatorBase& operator=(const OperatorBase& o) = delete;
outputs_(outputs), OperatorBase(OperatorBase&& o) = delete;
attrs_(attrs),
in_out_idxs_(in_out_idxs) {}
virtual ~OperatorBase() {} virtual ~OperatorBase() {}
...@@ -95,10 +82,6 @@ class OperatorBase { ...@@ -95,10 +82,6 @@ class OperatorBase {
virtual std::string DebugString() const; virtual std::string DebugString() const;
/// Init will be called after CreateOperator, you can put some initialization
/// logic here.
virtual void Init() {}
/// InferShape infer the size of Variables used by this Operator with /// InferShape infer the size of Variables used by this Operator with
/// information inside scope /// information inside scope
virtual void InferShape(const Scope& scope) const = 0; virtual void InferShape(const Scope& scope) const = 0;
...@@ -117,22 +100,18 @@ class OperatorBase { ...@@ -117,22 +100,18 @@ class OperatorBase {
//! Get a input with argument's name described in `op_proto` //! Get a input with argument's name described in `op_proto`
const std::string& Input(const std::string& name) const; const std::string& Input(const std::string& name) const;
//! Get a input which has multiple variables. //! Get a input which has multiple variables.
//! TODO add a vector_view to prevent memory copy. const std::vector<std::string>& Inputs(const std::string& name) const;
std::vector<std::string> Inputs(const std::string& name) const;
//! Get a output with argument's name described in `op_proto` //! Get a output with argument's name described in `op_proto`
const std::string& Output(const std::string& name) const; const std::string& Output(const std::string& name) const;
//! Get an output which has multiple variables. //! Get an output which has multiple variables.
//! TODO add a vector_view to prevent memory copy. //! TODO add a vector_view to prevent memory copy.
std::vector<std::string> Outputs(const std::string& name) const; const std::vector<std::string>& Outputs(const std::string& name) const;
virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
const std::string Type() const { return type_; } std::string Type() const { return type_; }
const std::vector<std::string> Inputs() const { return inputs_; }
const std::vector<std::string> Outputs() const { return outputs_; }
const AttributeMap& Attrs() const { return attrs_; } const AttributeMap& Attrs() const { return attrs_; }
const std::unordered_map<std::string, int>* InOutIdx() const {
return in_out_idxs_.get();
}
public: public:
std::string type_; std::string type_;
...@@ -140,19 +119,17 @@ class OperatorBase { ...@@ -140,19 +119,17 @@ class OperatorBase {
// I (Inputs) // I (Inputs)
// O (Outputs) // O (Outputs)
// OG (Output Gradients) // OG (Output Gradients)
std::vector<std::string> inputs_; VarNameMap inputs_;
// NOTE: in case of OpGrad, outputs_ contains // NOTE: in case of OpGrad, outputs_ contains
// IG (Inputs Gradients) // IG (Inputs Gradients)
std::vector<std::string> outputs_; VarNameMap outputs_;
AttributeMap attrs_; AttributeMap attrs_;
// store the arguments' offset described in op_desc.
std::shared_ptr<std::unordered_map<std::string, int>> in_out_idxs_;
}; };
class NOP : public OperatorBase { class NOP : public OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(NOP, OperatorBase) using OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
...@@ -163,16 +140,12 @@ class InferShapeContext { ...@@ -163,16 +140,12 @@ class InferShapeContext {
InferShapeContext(const OperatorBase& op, const Scope& scope) InferShapeContext(const OperatorBase& op, const Scope& scope)
: op_(op), scope_(scope) {} : op_(op), scope_(scope) {}
size_t InputSize() const { return op_.inputs_.size(); } size_t InputSize(const std::string& name) const {
return op_.Inputs(name).size();
size_t OutputSize() const { return op_.outputs_.size(); }
const Variable* InputVar(const size_t index) const {
return scope_.FindVar(op_.inputs_.at(index));
} }
Variable* OutputVar(const size_t index) const { size_t OutputSize(const std::string& name) const {
return scope_.FindVar(op_.outputs_.at(index)); return op_.Outputs(name).size();
} }
const Variable* InputVar(const std::string& name) const { const Variable* InputVar(const std::string& name) const {
...@@ -204,27 +177,9 @@ class InferShapeContext { ...@@ -204,27 +177,9 @@ class InferShapeContext {
return res; return res;
} }
template <typename T>
const T* Input(const size_t index) const {
auto var = InputVar(index);
PADDLE_ENFORCE_NOT_NULL(var, "Input(%d) should not be nullptr", index);
return &var->Get<T>();
}
template <typename T>
T* Output(const size_t index) const {
auto var = OutputVar(index);
PADDLE_ENFORCE_NOT_NULL(
var,
"Output(%d) not be nullptr, which means variable [%s] does not "
"exist in scope",
index, op_.outputs_[index]);
return var->GetMutable<T>();
}
template <typename T> template <typename T>
const T* Input(const std::string& name) const { const T* Input(const std::string& name) const {
auto var = InputVar(name); auto* var = InputVar(name);
PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name); PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name);
return &var->Get<T>(); return &var->Get<T>();
} }
...@@ -300,6 +255,10 @@ class ExecutionContext : public InferShapeContext { ...@@ -300,6 +255,10 @@ class ExecutionContext : public InferShapeContext {
platform::Place GetPlace() const { return device_context_->GetPlace(); } platform::Place GetPlace() const { return device_context_->GetPlace(); }
const platform::DeviceContext* device_context() const {
return device_context_;
}
const platform::DeviceContext* device_context_; const platform::DeviceContext* device_context_;
}; };
...@@ -319,14 +278,6 @@ class OpKernel { ...@@ -319,14 +278,6 @@ class OpKernel {
class OperatorWithKernel : public OperatorBase { class OperatorWithKernel : public OperatorBase {
public: public:
OperatorWithKernel() {} // TODO(yi): This constructor is to be removed.
OperatorWithKernel(const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const AttributeMap& attrs,
std::unordered_map<std::string, int>* in_out_idxs)
: OperatorBase(type, inputs, outputs, attrs, in_out_idxs) {}
struct OpKernelKey { struct OpKernelKey {
platform::Place place_; platform::Place place_;
...@@ -350,6 +301,10 @@ class OperatorWithKernel : public OperatorBase { ...@@ -350,6 +301,10 @@ class OperatorWithKernel : public OperatorBase {
using OpKernelMap = using OpKernelMap =
std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>; std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>;
OperatorWithKernel(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void InferShape(const Scope& scope) const override { void InferShape(const Scope& scope) const override {
InferShape(InferShapeContext(*this, scope)); InferShape(InferShapeContext(*this, scope));
} }
......
...@@ -23,22 +23,22 @@ static int op_run_num = 0; ...@@ -23,22 +23,22 @@ static int op_run_num = 0;
class OpWithoutKernelTest : public OperatorBase { class OpWithoutKernelTest : public OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(OpWithoutKernelTest, OperatorBase) OpWithoutKernelTest(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const AttributeMap& attrs)
void Init() override { x = 1; } : OperatorBase(type, inputs, outputs, attrs), x(1) {}
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
op_run_num++; ++op_run_num;
ASSERT_EQ((int)inputs_.size(), 1); ASSERT_EQ(static_cast<int>(inputs_.size()), 1);
ASSERT_EQ((int)outputs_.size(), 1); ASSERT_EQ(static_cast<int>(outputs_.size()), 1);
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(scope.FindVar(inputs_.at("input")[0]), nullptr);
ASSERT_EQ(x, 1); ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); ASSERT_NE(scope.FindVar(outputs_.at("output")[0]), nullptr);
} }
public: public:
float x = 0; int x{0};
}; };
class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
...@@ -56,6 +56,15 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -56,6 +56,15 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
static void BuildVar(const std::string& param_name,
std::initializer_list<const char*> arguments,
paddle::framework::OpDesc::Var* var) {
var->set_parameter(param_name);
for (auto& arg_name : arguments) {
*var->mutable_arguments()->Add() = arg_name;
}
}
REGISTER_OP_WITHOUT_GRADIENT( REGISTER_OP_WITHOUT_GRADIENT(
test_operator, paddle::framework::OpWithoutKernelTest, test_operator, paddle::framework::OpWithoutKernelTest,
paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker);
...@@ -63,8 +72,9 @@ REGISTER_OP_WITHOUT_GRADIENT( ...@@ -63,8 +72,9 @@ REGISTER_OP_WITHOUT_GRADIENT(
TEST(OperatorBase, all) { TEST(OperatorBase, all) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("test_operator"); op_desc.set_type("test_operator");
*op_desc.mutable_inputs()->Add() = "IN1"; BuildVar("input", {"IN1"}, op_desc.add_inputs());
*op_desc.mutable_outputs()->Add() = "OUT1"; BuildVar("output", {"OUT1"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
...@@ -101,7 +111,8 @@ static int cpu_kernel_run_num = 0; ...@@ -101,7 +111,8 @@ static int cpu_kernel_run_num = 0;
class OpWithKernelTest : public OperatorWithKernel { class OpWithKernelTest : public OperatorWithKernel {
public: public:
DEFINE_OPERATOR_CTOR(OpWithKernelTest, OperatorWithKernel) using OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override {} void InferShape(const framework::InferShapeContext& ctx) const override {}
}; };
...@@ -118,35 +129,15 @@ class CPUKernelTest : public OpKernel { ...@@ -118,35 +129,15 @@ class CPUKernelTest : public OpKernel {
} }
}; };
// multiple inputs test
class OperatorMultiInputsTest : public OperatorBase {
public:
DEFINE_OPERATOR_CTOR(OperatorMultiInputsTest, OperatorBase)
void Init() override { x = 1; }
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
ASSERT_EQ(Input("x"), "IN1");
ASSERT_EQ(Input("y"), "OUT1");
}
public:
float x = 0;
};
class OpKernelTestMultiInputsProtoAndCheckerMaker class OpKernelTestMultiInputsProtoAndCheckerMaker
: public OpProtoAndCheckerMaker { : public OpProtoAndCheckerMaker {
public: public:
OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto,
OpAttrChecker* op_checker) OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("xs", "inputs of test op").SetMultiple(); AddInput("xs", "inputs of test op").AsDuplicable();
AddInput("k", "input of test op"); AddInput("k", "input of test op");
AddOutput("ys", "outputs of test op").SetMultiple(); AddOutput("ys", "outputs of test op").AsDuplicable();
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .LargerThan(0.0);
...@@ -204,8 +195,9 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, ...@@ -204,8 +195,9 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
TEST(OpKernel, all) { TEST(OpKernel, all) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("op_with_kernel"); op_desc.set_type("op_with_kernel");
*op_desc.mutable_inputs()->Add() = "IN1"; BuildVar("x", {"IN1"}, op_desc.add_inputs());
*op_desc.mutable_outputs()->Add() = "OUT1"; BuildVar("y", {"OUT1"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
...@@ -232,32 +224,15 @@ TEST(OpKernel, multi_inputs) { ...@@ -232,32 +224,15 @@ TEST(OpKernel, multi_inputs) {
OpDesc op_desc; OpDesc op_desc;
op_desc.set_type("op_multi_inputs_with_kernel"); op_desc.set_type("op_multi_inputs_with_kernel");
*op_desc.mutable_inputs()->Add() = "x0"; BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs());
*op_desc.mutable_inputs()->Add() = "x1"; BuildVar("k", {"k0"}, op_desc.add_inputs());
*op_desc.mutable_inputs()->Add() = "x2"; BuildVar("ys", {"y0", "y1"}, op_desc.add_outputs());
*op_desc.mutable_inputs()->Add() = "k0";
*op_desc.mutable_outputs()->Add() = "y0";
*op_desc.mutable_outputs()->Add() = "y1";
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
attr->set_f(3.14); attr->set_f(3.14);
auto attr0 = op_desc.mutable_attrs()->Add();
attr0->set_name("input_format");
attr0->set_type(paddle::framework::AttrType::INTS);
auto input_format = attr0->mutable_ints();
input_format->Add(0); // x0
input_format->Add(3); // k
input_format->Add(4); // end
auto attr1 = op_desc.mutable_attrs()->Add();
attr1->set_name("output_format");
attr1->set_type(paddle::framework::AttrType::INTS);
auto output_format = attr1->mutable_ints();
output_format->Add(0); // y0
output_format->Add(2); // y1
paddle::platform::CPUDeviceContext cpu_device_context; paddle::platform::CPUDeviceContext cpu_device_context;
paddle::framework::Scope scope; paddle::framework::Scope scope;
scope.NewVar("x0")->GetMutable<Tensor>(); scope.NewVar("x0")->GetMutable<Tensor>();
......
...@@ -56,30 +56,18 @@ void ExposeOperator(ClassType &m) { ...@@ -56,30 +56,18 @@ void ExposeOperator(ClassType &m) {
return op.type_; return op.type_;
}) })
.def("outputs", .def("outputs",
[](const typename ClassType::type &op) -> std::vector<std::string> { [](const typename ClassType::type &op)
-> std::map<std::string, std::vector<std::string>> {
return op.outputs_; return op.outputs_;
}) })
.def("inputs", .def("inputs",
[](const typename ClassType::type &op) -> std::vector<std::string> { [](const typename ClassType::type &op) { return op.inputs_; })
return op.inputs_; .def("__str__", &ClassType::type::DebugString)
.def("no_intermediate_outputs",
[](const typename ClassType::type &op) {
return op.OutputVars(false);
}) })
.def("support_gpu", &ClassType::type::SupportGPU) .def("support_gpu", &ClassType::type::SupportGPU);
.def("temp_outputs",
[](const typename ClassType::type &op) -> std::vector<std::string> {
auto iter = op.attrs_.find("temporary_index");
std::vector<std::string> ret;
if (iter == op.attrs_.end()) {
return ret;
} else {
auto tmp_idx = boost::get<std::vector<int>>(iter->second);
ret.reserve(tmp_idx.size());
for (auto &index : tmp_idx) {
ret.push_back(op.outputs_.at(index));
}
return ret;
}
})
.def("__str__", &ClassType::type::DebugString);
} }
static size_t UniqueIntegerGenerator() { static size_t UniqueIntegerGenerator() {
......
...@@ -105,6 +105,8 @@ class Tensor { ...@@ -105,6 +105,8 @@ class Tensor {
template <typename T> template <typename T>
inline Tensor Slice(const int& begin_idx, const int& end_idx) const; inline Tensor Slice(const int& begin_idx, const int& end_idx) const;
platform::Place place() const { return holder_->place(); }
private: private:
template <typename T> template <typename T>
inline void check_memory_size() const; inline void check_memory_size() const;
......
...@@ -388,14 +388,23 @@ void initDataLayer(TestConfig testConf, ...@@ -388,14 +388,23 @@ void initDataLayer(TestConfig testConf,
data.grad->zeroMem(); data.grad->zeroMem();
break; break;
case INPUT_SELF_DEFINE_DATA: { case INPUT_SELF_DEFINE_DATA: {
size_t height = testConf.inputDefs[i].selfDefinedData->getHeight(); if (testConf.inputDefs[i].ids.size()) {
size_t width = testConf.inputDefs[i].selfDefinedData->getWidth(); data.ids = IVector::create(testConf.inputDefs[i].ids.size(), useGpu);
CHECK_GT(static_cast<int>(height), 0); data.ids->copyFrom(testConf.inputDefs[i].ids.data(),
CHECK_GT(static_cast<int>(width), 0); testConf.inputDefs[i].ids.size());
data.value = Matrix::create(height, width, false, useGpu); } else if (testConf.inputDefs[i].selfDefinedData) {
data.grad = Matrix::create(height, width, false, useGpu); size_t height = testConf.inputDefs[i].selfDefinedData->getHeight();
data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData); size_t width = testConf.inputDefs[i].selfDefinedData->getWidth();
data.grad->zeroMem(); CHECK_GT(static_cast<int>(height), 0);
CHECK_GT(static_cast<int>(width), 0);
data.value = Matrix::create(height, width, false, useGpu);
data.grad = Matrix::create(height, width, false, useGpu);
data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData);
data.grad->zeroMem();
} else {
LOG(FATAL) << "No self-defined data are given.";
return;
}
const std::vector<int>& labelSeqStartPositions = const std::vector<int>& labelSeqStartPositions =
testConf.inputDefs[i].labelSeqStartPositions; testConf.inputDefs[i].labelSeqStartPositions;
......
...@@ -68,6 +68,7 @@ struct InputDef { ...@@ -68,6 +68,7 @@ struct InputDef {
std::vector<int> labelInitValue; std::vector<int> labelInitValue;
std::vector<int> labelSeqStartPositions; std::vector<int> labelSeqStartPositions;
std::vector<int> labelSubSeqStartPositions; std::vector<int> labelSubSeqStartPositions;
std::vector<int> ids;
MatrixPtr selfDefinedData; MatrixPtr selfDefinedData;
InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) { InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) {
...@@ -95,6 +96,23 @@ struct InputDef { ...@@ -95,6 +96,23 @@ struct InputDef {
isStatic = false; isStatic = false;
} }
InputDef(InputType type,
string nameIn,
const std::vector<int>& ids,
const std::vector<int>& selfDefinedSeqStartPos = {},
const std::vector<int>& selfDefinedSubSeqStartPos = {})
: labelSeqStartPositions(selfDefinedSeqStartPos),
labelSubSeqStartPositions(selfDefinedSubSeqStartPos),
ids(ids) {
selfDefinedData = nullptr;
inputType = type;
name = nameIn;
dim = 0;
sparse = {""};
paraSize = 0;
isStatic = false;
}
InputDef(InputType type, InputDef(InputType type,
string nameIn, string nameIn,
size_t dimIn, size_t dimIn,
......
...@@ -41,6 +41,7 @@ function(op_library TARGET) ...@@ -41,6 +41,7 @@ function(op_library TARGET)
endif() endif()
endfunction() endfunction()
add_subdirectory(math)
cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(gather_test SRCS gather_test.cc DEPS tensor)
cc_library(net_op SRCS net_op.cc DEPS op_registry) cc_library(net_op SRCS net_op.cc DEPS op_registry)
...@@ -50,7 +51,7 @@ op_library(add_op SRCS add_op.cc add_op.cu) ...@@ -50,7 +51,7 @@ op_library(add_op SRCS add_op.cc add_op.cu)
op_library(mean_op SRCS mean_op.cc mean_op.cu) op_library(mean_op SRCS mean_op.cc mean_op.cu)
op_library(mul_op SRCS mul_op.cc mul_op.cu) op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function)
op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc)
op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu) op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu)
...@@ -62,7 +63,7 @@ op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) ...@@ -62,7 +63,7 @@ op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu)
op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) op_library(sgd_op SRCS sgd_op.cc sgd_op.cu)
op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS op_desc tensor op_registry operator net_op) DEPS framework_proto tensor op_registry operator net_op)
cc_test(recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op) cc_test(recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op)
op_library(uniform_random_op op_library(uniform_random_op
SRCS uniform_random_op.cc uniform_random_op.cu) SRCS uniform_random_op.cc uniform_random_op.cu)
...@@ -18,17 +18,15 @@ namespace paddle { ...@@ -18,17 +18,15 @@ namespace paddle {
namespace operators { namespace operators {
class AddOp : public framework::OperatorWithKernel { class AddOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(AddOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2); PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1); ctx.Input<Tensor>("Y")->dims(),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "Inputs of AddOp must all be set"); "Two input of Add Op's dimension must be same.");
PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
"Outputs of AddOp must all be set");
PADDLE_ENFORCE(ctx.Input<Tensor>(0)->dims() == ctx.Input<Tensor>(1)->dims(),
"Two input of Add Op's dimension must be same.");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims());
} }
}; };
...@@ -48,7 +46,9 @@ The equation is: Out = X + Y ...@@ -48,7 +46,9 @@ The equation is: Out = X + Y
}; };
class AddOpGrad : public framework::OperatorWithKernel { class AddOpGrad : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(AddOpGrad, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override {} void InferShape(const framework::InferShapeContext &ctx) const override {}
}; };
......
...@@ -28,9 +28,9 @@ template <typename Place, typename T> ...@@ -28,9 +28,9 @@ template <typename Place, typename T>
class AddKernel : public framework::OpKernel { class AddKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input0 = context.Input<Tensor>(0); auto* input0 = context.Input<Tensor>("X");
auto input1 = context.Input<Tensor>(1); auto* input1 = context.Input<Tensor>("Y");
auto output = context.Output<Tensor>(0); auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
......
...@@ -18,29 +18,25 @@ namespace paddle { ...@@ -18,29 +18,25 @@ namespace paddle {
namespace operators { namespace operators {
class OnehotCrossEntropyOp : public framework::OperatorWithKernel { class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(OnehotCrossEntropyOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, auto *X = ctx.Input<Tensor>("X");
"Input size of OnehotCrossEntropyOp must be two"); auto *label = ctx.Input<Tensor>("label");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1,
"Output size of OnehotCrossEntropyOp must be one"); PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1.");
"0-th input of OnehotCrossEntropyOp should be set"); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]);
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), ctx.Output<Tensor>("Y")->Resize({X->dims()[0]});
"1-th input of OnehotCrossEntropyOp should be set");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0),
"Outputs of OnehotCrossEntropyOp must all be set");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>(0)->dims().size(), 2);
PADDLE_ENFORCE_EQ(ctx.Output<Tensor>(0)->dims().size(), 1,
"label's dimension must be 1.");
ctx.Output<Tensor>(0)->Resize({ctx.Input<Tensor>(0)->dims()[0]});
} }
}; };
class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel { class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(OnehotCrossEntropyGradientOp, public:
framework::OperatorWithKernel) using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto X_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto X_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
......
...@@ -45,7 +45,7 @@ class OnehotCrossEntropyOpKernel : public framework::OpKernel { ...@@ -45,7 +45,7 @@ class OnehotCrossEntropyOpKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
const T* Xdata = X->data<T>(); const T* Xdata = X->data<T>();
const int* label_data = ctx.Input<Tensor>(1)->data<int>(); const int* label_data = ctx.Input<Tensor>("label")->data<int>();
auto Y = ctx.Output<Tensor>("Y"); auto Y = ctx.Output<Tensor>("Y");
Y->mutable_data<T>(ctx.GetPlace()); Y->mutable_data<T>(ctx.GetPlace());
......
...@@ -18,19 +18,13 @@ namespace paddle { ...@@ -18,19 +18,13 @@ namespace paddle {
namespace operators { namespace operators {
class FillZerosLikeOp : public framework::OperatorWithKernel { class FillZerosLikeOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(FillZerosLikeOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, ctx.Output<framework::Tensor>("Dst")->Resize(
"Input size of FillZerosLikeOp must be one."); ctx.Input<framework::Tensor>("Src")->dims());
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Output size of AddOp must be one.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0),
"Input of FillZerosLikeOp must be set.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0),
"Output of FillZerosLikeOp must be set.");
ctx.Output<framework::Tensor>(0)->Resize(
ctx.Input<framework::Tensor>(0)->dims());
} }
}; };
......
...@@ -23,7 +23,7 @@ template <typename Place, typename T> ...@@ -23,7 +23,7 @@ template <typename Place, typename T>
class FillZerosLikeKernel : public framework::OpKernel { class FillZerosLikeKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* output = context.Output<framework::Tensor>(0); auto* output = context.Output<framework::Tensor>("Dst");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*output); auto t = framework::EigenVector<T>::Flatten(*output);
t.device(context.GetEigenDevice<Place>()) = t.constant(T(0)); t.device(context.GetEigenDevice<Place>()) = t.constant(T(0));
......
...@@ -43,7 +43,9 @@ class GaussianRandomKernel : public framework::OpKernel { ...@@ -43,7 +43,9 @@ class GaussianRandomKernel : public framework::OpKernel {
}; };
class GaussianRandomOp : public framework::OperatorWithKernel { class GaussianRandomOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(GaussianRandomOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& context) const override { void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>(0);
......
if(WITH_MKLML)
set(BLAS_LIB mklml)
else()
set(BLAS_LIB cblas)
endif()
if(WITH_GPU)
nv_library(math_function SRCS math_function.cc math_function.cu DEPS ${BLAS_LIB} device_context)
else()
cc_library(math_function SRCS math_function.cc DEPS ${BLAS_LIB} device_context)
endif()
nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
int lda = K;
int ldb = N;
int ldc = N;
cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
int lda = K;
int ldb = N;
int ldc = N;
cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void matmul<platform::CPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
platform::is_cpu_place(matrix_b.place()) &&
platform::is_cpu_place(matrix_out->place()),
"Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
}
template <>
void matmul<platform::CPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
platform::is_cpu_place(matrix_b.place()) &&
platform::is_cpu_place(matrix_out->place()),
"Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
}
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void matmul<platform::GPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
}
template <>
void matmul<platform::GPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
}
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_USE_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#endif
#ifdef PADDLE_USE_MKL
#include <mkl.h>
#include <mkl_lapacke.h>
#endif
#ifdef PADDLE_USE_ATLAS
extern "C" {
#include <cblas.h>
#include <clapack.h>
}
#endif
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#include <lapacke.h>
#endif
#ifndef LAPACK_FOUND
extern "C" {
#include <cblas.h>
int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda,
int* ipiv);
int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda,
int* ipiv);
int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda,
const int* ipiv);
int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda,
const int* ipiv);
}
#endif
#include <cmath>
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
#include "paddle/platform/enforce.h"
namespace paddle {
namespace operators {
namespace math {
// Support continuous memory now
// If transA = N, and transB = N
// Then matrixA: M * K, matrixB: K * N matrixC : M * N
// For more detailed info, please refer to
// http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html
template <typename Place, typename T>
void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const T alpha, const T* A,
const T* B, const T beta, T* C, platform::DeviceContext* context);
// matrix multiply with continuous memory
template <typename Place, typename T>
void matmul(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, T alpha,
framework::Tensor* matrix_out, T beta,
platform::DeviceContext* context);
} // namespace math
} // namespace operators
} // namespace paddle
#include "paddle/operators/math/math_function.h"
#include "gtest/gtest.h"
#ifndef PADDLE_ONLY_CPU
TEST(math_function, notrans_mul_trans) {
paddle::framework::Tensor input1;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
auto* cpu_place = new paddle::platform::CPUPlace();
float* input1_ptr = input1.mutable_data<float>({2, 3}, *cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
out_gpu.mutable_data<float>({2, 2}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0, context);
out.CopyFrom<float>(out_gpu, *cpu_place);
float* out_ptr = out.data<float>();
EXPECT_EQ(out_ptr[0], 5);
EXPECT_EQ(out_ptr[1], 14);
EXPECT_EQ(out_ptr[2], 14);
EXPECT_EQ(out_ptr[3], 50);
}
TEST(math_function, trans_mul_notrans) {
paddle::framework::Tensor input1;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
auto* cpu_place = new paddle::platform::CPUPlace();
float* input1_ptr = input1.mutable_data<float>({2, 3}, *cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
out_gpu.mutable_data<float>({3, 3}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0, context);
out.CopyFrom<float>(out_gpu, *cpu_place);
float* out_ptr = out.data<float>();
EXPECT_EQ(out_ptr[0], 9);
EXPECT_EQ(out_ptr[1], 12);
EXPECT_EQ(out_ptr[2], 15);
EXPECT_EQ(out_ptr[3], 12);
EXPECT_EQ(out_ptr[4], 17);
EXPECT_EQ(out_ptr[5], 22);
EXPECT_EQ(out_ptr[6], 15);
EXPECT_EQ(out_ptr[7], 22);
EXPECT_EQ(out_ptr[8], 29);
}
#endif
...@@ -18,14 +18,14 @@ namespace paddle { ...@@ -18,14 +18,14 @@ namespace paddle {
namespace operators { namespace operators {
class MeanOp : public framework::OperatorWithKernel { class MeanOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(MeanOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1, "Input size of AddOp must be one"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of AddOp must be one"); "Input of MeanOp must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "input should be set"); ctx.Output<Tensor>("Out")->Resize({1});
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "output should be set");
ctx.Output<Tensor>(0)->Resize(framework::make_ddim({1}));
} }
}; };
...@@ -34,13 +34,15 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -34,13 +34,15 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of mean op"); AddInput("X", "The input of mean op");
AddOutput("Out", "The output of mean op").IgnoreGradient(); AddOutput("Out", "The output of mean op").AsNoGradient();
AddComment("Mean Operator"); AddComment("Mean Operator");
} }
}; };
class MeanGradOp : public framework::OperatorWithKernel { class MeanGradOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(MeanGradOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>(framework::GradVarName("X")) ctx.Output<Tensor>(framework::GradVarName("X"))
......
...@@ -31,14 +31,14 @@ template <typename Place, typename T> ...@@ -31,14 +31,14 @@ template <typename Place, typename T>
class MeanKernel : public framework::OpKernel { class MeanKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input = context.Input<Tensor>(0); auto* input = context.Input<Tensor>("X");
auto output = context.Output<Tensor>(0); auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto X = EigenVector<T>::Flatten(*input); auto X = EigenVector<T>::Flatten(*input);
auto y = EigenScalar<T>::From(*output); auto y = EigenScalar<T>::From(*output);
auto place = context.GetEigenDevice<Place>(); auto& place = context.GetEigenDevice<Place>();
y.device(place) = X.mean(); y.device(place) = X.mean();
} }
......
...@@ -13,17 +13,19 @@ ...@@ -13,17 +13,19 @@
limitations under the License. */ limitations under the License. */
#include "paddle/operators/mul_op.h" #include "paddle/operators/mul_op.h"
#include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class MulOp : public framework::OperatorWithKernel { class MulOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(MulOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs"); auto dim0 = ctx.Input<Tensor>("X")->dims();
auto dim0 = ctx.Input<Tensor>(0)->dims(); auto dim1 = ctx.Input<Tensor>("Y")->dims();
auto dim1 = ctx.Input<Tensor>(1)->dims();
PADDLE_ENFORCE_EQ(dim0.size(), 2, PADDLE_ENFORCE_EQ(dim0.size(), 2,
"input X(%s) should be a tensor with 2 dims, a matrix", "input X(%s) should be a tensor with 2 dims, a matrix",
ctx.op_.Input("X")); ctx.op_.Input("X"));
...@@ -33,8 +35,7 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -33,8 +35,7 @@ class MulOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dim0[1], dim1[0], dim0[1], dim1[0],
"First matrix's width must be equal with second matrix's height."); "First matrix's width must be equal with second matrix's height.");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output"); ctx.Output<Tensor>("Out")->Resize({dim0[0], dim1[1]});
ctx.Output<Tensor>(0)->Resize({dim0[0], dim1[1]});
} }
}; };
...@@ -54,7 +55,9 @@ The equation is: Out = X * Y ...@@ -54,7 +55,9 @@ The equation is: Out = X * Y
}; };
class MulOpGrad : public framework::OperatorWithKernel { class MulOpGrad : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(MulOpGrad, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override {} void InferShape(const framework::InferShapeContext &ctx) const override {}
std::string DebugString() const override { std::string DebugString() const override {
......
...@@ -16,5 +16,4 @@ ...@@ -16,5 +16,4 @@
#include "paddle/operators/mul_op.h" #include "paddle/operators/mul_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>); REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>);
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/operators/math/math_function.h"
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
...@@ -30,17 +33,14 @@ class MulKernel : public framework::OpKernel { ...@@ -30,17 +33,14 @@ class MulKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair = { Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair = {
{Eigen::IndexPair<Eigen::DenseIndex>(1, 0)}}; {Eigen::IndexPair<Eigen::DenseIndex>(1, 0)}};
auto* input0 = context.Input<Tensor>("X");
auto input0 = context.Input<Tensor>("X"); auto* input1 = context.Input<Tensor>("Y");
auto input1 = context.Input<Tensor>("Y"); auto* output = context.Output<Tensor>("Out");
auto output = context.Output<Tensor>(0);
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto X = EigenMatrix<T>::From(*input0); auto X = EigenMatrix<T>::From(*input0);
auto Y = EigenMatrix<T>::From(*input1); auto Y = EigenMatrix<T>::From(*input1);
auto Z = EigenMatrix<T>::From(*output); auto Z = EigenMatrix<T>::From(*output);
auto place = context.GetEigenDevice<Place>(); auto& place = context.GetEigenDevice<Place>();
Z.device(place) = X.contract(Y, dim_pair); Z.device(place) = X.contract(Y, dim_pair);
} }
......
...@@ -15,48 +15,42 @@ ...@@ -15,48 +15,42 @@
*/ */
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include <set>
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
const char NetOp::kAll[] = "all";
void NetOp::CompleteAddOp(bool calc) { void NetOp::CompleteAddOp(bool calc) {
add_op_done_ = true; add_op_done_ = true;
if (!calc) return; if (!calc) return;
std::unordered_set<std::string> input_set; std::set<std::string> input_set;
std::unordered_set<std::string> output_set; std::set<std::string> output_set;
std::unordered_set<std::string> temp_output;
for (auto& op : ops_) { for (auto& op : ops_) {
for (auto& ipt : op->inputs_) { for (auto& ipt : op->inputs_) {
if (!Contains(output_set, ipt)) { // Not other op's output for (auto& var_name : ipt.second) {
input_set.insert(ipt); if (!Contains(output_set, var_name)) { // Not other op's output
} else { input_set.insert(var_name);
temp_output.insert(ipt); } else {
intermediate_outputs_.insert(var_name);
}
} }
} }
for (auto& opt : op->outputs_) { for (auto& opt : op->outputs_) {
output_set.insert(opt); for (auto& var_name : opt.second) {
} output_set.insert(var_name);
} }
inputs_.reserve(input_set.size());
std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs_));
std::sort(inputs_.begin(), inputs_.end());
outputs_.reserve(output_set.size());
std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs_));
std::sort(outputs_.begin(), outputs_.end());
std::vector<int> tmp_index;
tmp_index.reserve(temp_output.size());
int output_len = static_cast<int>(outputs_.size());
for (int i = 0; i < output_len; ++i) {
if (Contains(temp_output, outputs_[i])) {
tmp_index.push_back(i);
} }
} }
auto& inputs = inputs_[kAll];
attrs_["temporary_index"] = tmp_index; inputs.reserve(input_set.size());
std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs));
auto& outputs = outputs_[kAll];
outputs.reserve(output_set.size());
std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs));
} }
std::string NetOp::DebugString() const { std::string NetOp::DebugString() const {
...@@ -73,5 +67,25 @@ std::string NetOp::DebugString() const { ...@@ -73,5 +67,25 @@ std::string NetOp::DebugString() const {
bool NetOp::IsNetOp() const { return true; } bool NetOp::IsNetOp() const { return true; }
std::vector<std::string> NetOp::OutputVars(bool has_intermediate) const {
if (has_intermediate) {
return this->outputs_.at(kAll);
}
auto& all = this->outputs_.at(kAll);
std::vector<std::string> ret_val;
for (auto& each : all) {
if (!Contains(intermediate_outputs_, each)) {
ret_val.push_back(each);
}
}
return ret_val;
}
NetOp::NetOp(const std::string& type,
const framework::OperatorBase::VarNameMap& inputs,
const framework::OperatorBase::VarNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
...@@ -35,7 +36,10 @@ namespace operators { ...@@ -35,7 +36,10 @@ namespace operators {
*/ */
class NetOp : public framework::OperatorBase { class NetOp : public framework::OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(NetOp, framework::OperatorBase) static const char kAll[];
NetOp() : framework::OperatorBase("plain_net", {}, {}, {}) {}
NetOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const framework::AttributeMap& attrs);
/** /**
* Infer all the operators' input and output variables' shapes, will be called * Infer all the operators' input and output variables' shapes, will be called
...@@ -92,11 +96,13 @@ class NetOp : public framework::OperatorBase { ...@@ -92,11 +96,13 @@ class NetOp : public framework::OperatorBase {
std::string DebugString() const override; std::string DebugString() const override;
bool IsNetOp() const override; bool IsNetOp() const override;
std::vector<std::string> OutputVars(bool has_intermediate) const override;
std::vector<std::shared_ptr<OperatorBase>> ops_; std::vector<std::shared_ptr<OperatorBase>> ops_;
private: private:
bool add_op_done_{false}; bool add_op_done_{false};
std::set<std::string> intermediate_outputs_;
template <typename T, typename KeyType> template <typename T, typename KeyType>
static bool Contains(T container, KeyType key) { static bool Contains(T container, KeyType key) {
......
...@@ -12,8 +12,7 @@ static int run_cnt = 0; ...@@ -12,8 +12,7 @@ static int run_cnt = 0;
class TestOp : public framework::OperatorBase { class TestOp : public framework::OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(TestOp, framework::OperatorBase) using framework::OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override { ++infer_shape_cnt; } void InferShape(const Scope& scope) const override { ++infer_shape_cnt; }
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
...@@ -23,8 +22,7 @@ class TestOp : public framework::OperatorBase { ...@@ -23,8 +22,7 @@ class TestOp : public framework::OperatorBase {
class EmptyOp : public framework::OperatorBase { class EmptyOp : public framework::OperatorBase {
public: public:
DEFINE_OPERATOR_CTOR(EmptyOp, framework::OperatorBase) using framework::OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {} void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {}
}; };
...@@ -46,40 +44,32 @@ TEST(OpKernel, all) { ...@@ -46,40 +44,32 @@ TEST(OpKernel, all) {
auto net = std::make_shared<NetOp>(); auto net = std::make_shared<NetOp>();
ASSERT_NE(net, nullptr); ASSERT_NE(net, nullptr);
auto op1 = std::make_shared<TestOp>(); auto op1 = std::shared_ptr<TestOp>(
op1->inputs_ = {"x", "w1", "b1"}; new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}},
op1->outputs_ = {"y"}; {{"Out", {"y"}}}, {}));
net->AddOp(op1); net->AddOp(op1);
auto op2 = std::make_shared<TestOp>(); auto op2 = std::shared_ptr<TestOp>(
op2->inputs_ = {"y", "w2", "b2"}; new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}},
op2->outputs_ = {"z"}; {{"Out", {"z"}}}, {}));
net->AddOp(op2); net->AddOp(op2);
net->CompleteAddOp(); net->CompleteAddOp();
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, net->inputs_); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"},
AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_); net->inputs_.at(NetOp::kAll));
auto tmp_idx_iter = net->attrs_.find("temporary_index"); AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at(NetOp::kAll));
ASSERT_NE(net->attrs_.end(), tmp_idx_iter);
auto& tmp_idx = boost::get<std::vector<int>>(tmp_idx_iter->second);
ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net->outputs_[tmp_idx[0]]);
Scope scope; auto final_outs = net->OutputVars(false);
platform::CPUDeviceContext dev_ctx;
net->InferShape(scope); ASSERT_EQ(final_outs.size(), 1UL);
net->Run(scope, dev_ctx); ASSERT_EQ(final_outs[0], "z");
ASSERT_EQ(2, infer_shape_cnt);
ASSERT_EQ(2, run_cnt);
ASSERT_THROW(net->AddOp(op2), platform::EnforceNotMet);
} }
TEST(NetOp, insert_op) { TEST(NetOp, insert_op) {
NetOp net; NetOp net;
auto op1 = std::make_shared<EmptyOp>(); auto op1 = std::shared_ptr<EmptyOp>(
op1->inputs_ = {"x", "w1", "b1"}; new EmptyOp("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}},
op1->outputs_ = {"y"}; {{"Out", {"y"}}}, {}));
net.AddOp(op1); net.AddOp(op1);
net.InsertOp(0, op1); net.InsertOp(0, op1);
ASSERT_EQ(2UL, net.ops_.size()); ASSERT_EQ(2UL, net.ops_.size());
......
...@@ -91,12 +91,17 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { ...@@ -91,12 +91,17 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// create step net's temp inputs // create step net's temp inputs
for (auto& input : net_op->inputs_) { for (auto& input : net_op->inputs_) {
// the weight are located in parent scope // the weight are located in parent scope
if (!step_scope.FindVar(input)) for (auto& var_name : input.second) {
step_scope.NewVar(input)->GetMutable<Tensor>(); if (!step_scope.FindVar(var_name)) {
step_scope.NewVar(var_name)->GetMutable<Tensor>();
}
}
} }
// create stepnet's outputs // create stepnet's outputs
for (const auto& output : net_op->outputs_) { for (const auto& output : net_op->outputs_) {
step_scope.NewVar(output); for (auto& var_name : output.second) {
step_scope.NewVar(var_name);
}
} }
step_scopes->emplace_back(&step_scope); step_scopes->emplace_back(&step_scope);
} }
...@@ -130,8 +135,11 @@ const rnn::ArgumentName RecurrentGradientOp::kArgName{ ...@@ -130,8 +135,11 @@ const rnn::ArgumentName RecurrentGradientOp::kArgName{
"inlink@grad", "inlink_alias", "outlink_alias", "inlink@grad", "inlink_alias", "outlink_alias",
"memories", "pre_memories", "boot_memories@grad"}; "memories", "pre_memories", "boot_memories@grad"};
void RecurrentOp::Init() { RecurrentOp::RecurrentOp(const std::string& type,
OperatorBase::Init(); const framework::OperatorBase::VarNameMap& inputs,
const framework::OperatorBase::VarNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {
std::unique_ptr<rnn::Argument> arg(new rnn::Argument()); std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
rnn::InitArgument(kArgName, arg.get(), *this); rnn::InitArgument(kArgName, arg.get(), *this);
alg_.Init(std::move(arg)); alg_.Init(std::move(arg));
...@@ -147,13 +155,13 @@ class RecurrentAlgorithmProtoAndCheckerMaker ...@@ -147,13 +155,13 @@ class RecurrentAlgorithmProtoAndCheckerMaker
// inputs and outputs stored in proto // inputs and outputs stored in proto
AddInput(name.inlinks, AddInput(name.inlinks,
"the inputs that need to be segmented for each step.") "the inputs that need to be segmented for each step.")
.SetMultiple(); .AsDuplicable();
AddInput(name.boot_memories, "variables to initialize memories.") AddInput(name.boot_memories, "variables to initialize memories.")
.SetMultiple(); .AsDuplicable();
AddInput(name.step_net, "network shared by all steps."); AddInput(name.step_net, "network shared by all steps.");
AddOutput(name.outlinks, "the outputs that need to concated for all steps.") AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
.SetMultiple(); .AsDuplicable();
AddOutput(name.step_scopes, "step scopes"); AddOutput(name.step_scopes, "step scopes");
// Attributes stored in AttributeMap // Attributes stored in AttributeMap
...@@ -225,8 +233,11 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { ...@@ -225,8 +233,11 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const {
LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/); LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/);
} }
void RecurrentGradientOp::Init() { RecurrentGradientOp::RecurrentGradientOp(
OperatorBase::Init(); const std::string& type, const framework::OperatorBase::VarNameMap& inputs,
const framework::OperatorBase::VarNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {
std::unique_ptr<rnn::Argument> arg(new rnn::Argument()); std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
rnn::InitArgument(kArgName, arg.get(), *this); rnn::InitArgument(kArgName, arg.get(), *this);
alg_.Init(std::move(arg)); alg_.Init(std::move(arg));
......
...@@ -100,13 +100,12 @@ class RecurrentGradientAlgorithm { ...@@ -100,13 +100,12 @@ class RecurrentGradientAlgorithm {
}; };
class RecurrentOp final : public framework::OperatorBase { class RecurrentOp final : public framework::OperatorBase {
DEFINE_OPERATOR_CTOR(RecurrentOp, framework::OperatorBase)
public: public:
void Init() override; RecurrentOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const framework::AttributeMap& attrs);
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
void InferShape(const framework::Scope& scope) const override { void InferShape(const framework::Scope& scope) const override {
alg_.InferShape(scope); alg_.InferShape(scope);
} }
...@@ -124,7 +123,9 @@ class RecurrentOp final : public framework::OperatorBase { ...@@ -124,7 +123,9 @@ class RecurrentOp final : public framework::OperatorBase {
class RecurrentGradientOp final : public framework::OperatorBase { class RecurrentGradientOp final : public framework::OperatorBase {
public: public:
void Init() override; RecurrentGradientOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs,
const framework::AttributeMap& attrs);
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
......
...@@ -25,157 +25,7 @@ ...@@ -25,157 +25,7 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::make_ddim; using namespace paddle::framework;
using framework::DDim;
using framework::Tensor;
using framework::Variable;
using framework::Scope;
using framework::OpRegistry;
class RecurrentOpTest : public ::testing::Test {
protected:
virtual void SetUp() override {
CreateGlobalVariables();
CreateStepNet();
CreateRNNOp();
}
virtual void TearDown() override {}
void CreateGlobalVariables() {
// create input, and init content
LOG(INFO) << "create global variable x";
for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
Variable* x = scope_.NewVar(inlink);
DDim dims = make_ddim(std::vector<int>{
10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
}
// create output alias just for test
for (auto inlink : std::vector<std::string>{"h@alias"}) {
Variable* x = scope_.NewVar(inlink);
DDim dims =
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
}
LOG(INFO) << "create global variable w";
Variable* w = scope_.NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
for (auto boot : std::vector<std::string>{"h_boot"}) {
LOG(INFO) << "create global variable " << boot;
Variable* h_boot = scope_.NewVar(boot);
h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
platform::CPUPlace());
}
LOG(INFO) << "create variable step_scopes";
scope_.NewVar("step_scopes");
LOG(INFO) << "create variable h";
scope_.NewVar("h");
}
void CreateRNNOp() {
framework::OpDesc op_desc;
op_desc.set_type("recurrent_op");
// inlinks 0
op_desc.add_inputs("x");
op_desc.add_inputs("x0");
op_desc.add_inputs("x1");
// boot_memories 3
op_desc.add_inputs("h_boot");
// step net 5
op_desc.add_inputs("step_net");
// outlinks 6
op_desc.add_outputs("h");
// step scopes 7
op_desc.add_outputs("step_scopes");
auto _input_format = std::vector<int>{
0, // in_link
3, // memories
4 // step_net
};
auto input_format = op_desc.add_attrs();
input_format->set_name("input_format");
input_format->set_type(paddle::framework::AttrType::INTS);
for (auto i : _input_format) {
input_format->add_ints(i);
}
auto output_format = op_desc.add_attrs();
output_format->set_name("output_format");
output_format->set_type(paddle::framework::AttrType::INTS);
for (auto i : std::vector<int>{0, 1, 2}) {
output_format->add_ints(i);
}
auto inlink_alias = op_desc.add_attrs();
inlink_alias->set_name("inlink_alias");
inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
auto outlink_alias = op_desc.add_attrs();
outlink_alias->set_name("outlink_alias");
outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
auto pre_memories = op_desc.add_attrs();
pre_memories->set_name("pre_memories");
pre_memories->set_type(paddle::framework::AttrType::STRINGS);
auto memories = op_desc.add_attrs();
memories->set_name("memories");
memories->set_type(paddle::framework::AttrType::STRINGS);
// create inlink_alias
for (const auto& item :
std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
inlink_alias->add_strings(item);
}
// pre memories
for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
pre_memories->add_strings(item);
}
// memories
for (const auto& item : std::vector<std::string>{"rnn/h"}) {
memories->add_strings(item);
}
// output alias
for (const auto& item : std::vector<std::string>{"h@alias"}) {
outlink_alias->add_strings(item);
}
rnn_op_ = OpRegistry::CreateOp(op_desc);
LOG(INFO) << "rnn_op finish init";
}
void CreateStepNet() {
LOG(INFO) << "create variable step_net";
Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>();
net->AddOp(
OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
net->AddOp(
OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
net->CompleteAddOp();
}
// father scope
Scope scope_;
std::shared_ptr<framework::OperatorBase> rnn_op_;
};
TEST_F(RecurrentOpTest, Run) {
platform::CPUDeviceContext ctx;
rnn_op_->InferShape(scope_);
rnn_op_->Run(scope_, ctx);
}
class RecurrentGradientAlgorithmTest : public ::testing::Test { class RecurrentGradientAlgorithmTest : public ::testing::Test {
protected: protected:
...@@ -281,11 +131,13 @@ class RecurrentGradientAlgorithmTest : public ::testing::Test { ...@@ -281,11 +131,13 @@ class RecurrentGradientAlgorithmTest : public ::testing::Test {
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
Variable* var = scope_.NewVar("step_net"); Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>(); auto net = var->GetMutable<NetOp>();
net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", "rnn/s_grad"}, // TODO(qingqing) modify backward op create for RNNOp unit test
{"rnn/h_pre_grad", "rnn/w_grad"}, {})); // and the unit test will be removed to Python.
// net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w",
// "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {}));
net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"}, // net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}},
{"rnn/x_grad", "rnn/s_grad"}, {})); // {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {}));
net->CompleteAddOp(); net->CompleteAddOp();
} }
...@@ -359,7 +211,8 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -359,7 +211,8 @@ TEST(RecurrentOp, LinkMemories) {
memories.push_back(mem_attr); memories.push_back(mem_attr);
for (size_t i = 1; i < len; ++i) { for (size_t i = 1; i < len; ++i) {
rnn::LinkMemories(step_scopes, memories, i, -1, false /*infer_shape_mode*/); rnn::LinkMemories(step_scopes, memories, i, -1, false
/*infer_shape_mode*/);
} }
// check // check
for (size_t i = 0; i < len - 1; ++i) { for (size_t i = 0; i < len - 1; ++i) {
...@@ -375,7 +228,8 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -375,7 +228,8 @@ TEST(RecurrentOp, LinkMemories) {
} }
for (int i = len - 2; i >= 0; --i) { for (int i = len - 2; i >= 0; --i) {
rnn::LinkMemories(step_scopes, memories, i, 1, false /*infer_shape_mode*/); rnn::LinkMemories(step_scopes, memories, i, 1, false
/*infer_shape_mode*/);
} }
// check // check
for (int i = len - 2; i >= 0; --i) { for (int i = len - 2; i >= 0; --i) {
......
...@@ -18,19 +18,19 @@ namespace paddle { ...@@ -18,19 +18,19 @@ namespace paddle {
namespace operators { namespace operators {
class RowWiseAddOp : public framework::OperatorWithKernel { class RowWiseAddOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(RowWiseAddOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 2UL, auto dim0 = ctx.Input<Tensor>("X")->dims();
"Two inputs is needed by rowwise add"); auto dim1 = ctx.Input<Tensor>("b")->dims();
auto dim0 = ctx.Input<Tensor>(0)->dims();
auto dim1 = ctx.Input<Tensor>(1)->dims();
PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix"); PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix");
PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector"); PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector");
PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same"); PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same");
PADDLE_ENFORCE(ctx.OutputSize() == 1, "The output size must be 1"); PADDLE_ENFORCE(ctx.OutputSize("Out") == 1, "The output size must be 1");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -31,11 +31,11 @@ template <typename Place, typename T> ...@@ -31,11 +31,11 @@ template <typename Place, typename T>
class RowWiseAddKernel : public framework::OpKernel { class RowWiseAddKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto out = context.Output<Tensor>(0); auto out = context.Output<Tensor>("Out");
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
auto input = EigenMatrix<T>::From(*context.Input<Tensor>(0)); auto input = EigenMatrix<T>::From(*context.Input<Tensor>("X"));
auto bias = EigenVector<T>::From(*context.Input<Tensor>(1)); auto bias = EigenVector<T>::From(*context.Input<Tensor>("b"));
auto output = EigenMatrix<T>::From(*out); auto output = EigenMatrix<T>::From(*out);
const int bias_size = bias.dimension(0); const int bias_size = bias.dimension(0);
......
...@@ -18,17 +18,15 @@ namespace paddle { ...@@ -18,17 +18,15 @@ namespace paddle {
namespace operators { namespace operators {
class SGDOp : public framework::OperatorWithKernel { class SGDOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(SGDOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, "Input size of SGDOp must be two"); PADDLE_ENFORCE(
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of SGDOp must be one"); ctx.Input<Tensor>("param")->dims() == ctx.Input<Tensor>("grad")->dims(),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "inputs[0] mast be set"); "Two input of SGD Op's dimension must be same.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), "inputs[1] mast be set"); ctx.Output<Tensor>("param_out")->Resize(ctx.Input<Tensor>("param")->dims());
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "outputs[0] mast be set");
PADDLE_ENFORCE(ctx.Input<Tensor>(0)->dims() == ctx.Input<Tensor>(1)->dims(),
"Two input of SGD Op's dimension must be same.");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims());
} }
}; };
......
...@@ -18,12 +18,12 @@ namespace paddle { ...@@ -18,12 +18,12 @@ namespace paddle {
namespace operators { namespace operators {
class SigmoidOp : public framework::OperatorWithKernel { class SigmoidOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(SigmoidOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 1, "Sigmoid Op only have one input"); ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
PADDLE_ENFORCE(ctx.OutputSize() == 1, "Sigmoid Op only have one output");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims());
} }
}; };
...@@ -39,7 +39,9 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -39,7 +39,9 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
}; };
class SigmoidOpGrad : public framework::OperatorWithKernel { class SigmoidOpGrad : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(SigmoidOpGrad, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims());
......
...@@ -28,8 +28,8 @@ template <typename Place, typename T> ...@@ -28,8 +28,8 @@ template <typename Place, typename T>
class SigmoidKernel : public framework::OpKernel { class SigmoidKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input = context.Input<Tensor>(0); auto input = context.Input<Tensor>("X");
auto output = context.Output<Tensor>(0); auto output = context.Output<Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
// The clipping is used in Paddle's raw implenmention // The clipping is used in Paddle's raw implenmention
......
...@@ -18,15 +18,13 @@ namespace paddle { ...@@ -18,15 +18,13 @@ namespace paddle {
namespace operators { namespace operators {
class SoftmaxOp : public framework::OperatorWithKernel { class SoftmaxOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(SoftmaxOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL,
"Only one input is need for softmax"); "The input of softmax op must be matrix");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims().size(), 2UL,
"The input of softmax op must be matrix");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Only one output is need for softmax");
ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims()); ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
...@@ -43,14 +41,12 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -43,14 +41,12 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
}; };
class SoftmaxOpGrad : public framework::OperatorWithKernel { class SoftmaxOpGrad : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(SoftmaxOpGrad, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 3UL, PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null");
"Input of SoftmaxOpGrad should be 3, X, Y, YG");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Output of SoftmaxOpGrad should be 1");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")),
"Input(Y@GRAD) should not be null"); "Input(Y@GRAD) should not be null");
PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() == PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() ==
......
...@@ -27,7 +27,7 @@ template <typename T> ...@@ -27,7 +27,7 @@ template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel { class CPUUniformRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = unsigned int seed =
static_cast<unsigned int>(context.op_.GetAttr<int>("seed")); static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
...@@ -46,12 +46,14 @@ class CPUUniformRandomKernel : public framework::OpKernel { ...@@ -46,12 +46,14 @@ class CPUUniformRandomKernel : public framework::OpKernel {
}; };
class UniformRandomOp : public framework::OperatorWithKernel { class UniformRandomOp : public framework::OperatorWithKernel {
DEFINE_OPERATOR_CTOR(UniformRandomOp, framework::OperatorWithKernel) public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override { void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"), PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::Tensor>(0); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims"); auto dims = GetAttr<std::vector<int>>("dims");
tensor->Resize(framework::make_ddim(dims)); tensor->Resize(framework::make_ddim(dims));
} }
......
...@@ -46,7 +46,7 @@ template <typename T> ...@@ -46,7 +46,7 @@ template <typename T>
class GPUUniformRandomKernel : public framework::OpKernel { class GPUUniformRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = unsigned int seed =
static_cast<unsigned int>(context.op_.GetAttr<int>("seed")); static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
......
...@@ -62,12 +62,12 @@ extern void *cublas_dso_handle; ...@@ -62,12 +62,12 @@ extern void *cublas_dso_handle;
DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name)
#define CUBLAS_BLAS_ROUTINE_EACH(__macro) \ #define CUBLAS_BLAS_ROUTINE_EACH(__macro) \
__macro(cublasSgemv); \ __macro(cublasSgemv_v2); \
__macro(cublasDgemv); \ __macro(cublasDgemv_v2); \
__macro(cublasSgemm); \ __macro(cublasSgemm_v2); \
__macro(cublasDgemm); \ __macro(cublasDgemm_v2); \
__macro(cublasSgeam); \ __macro(cublasSgeam_v2); \
__macro(cublasDgeam); \ __macro(cublasDgeam_v2); \
__macro(cublasCreate_v2); \ __macro(cublasCreate_v2); \
__macro(cublasDestroy_v2); \ __macro(cublasDestroy_v2); \
__macro(cublasSetStream_v2); \ __macro(cublasSetStream_v2); \
......
...@@ -14,14 +14,21 @@ limitations under the License. */ ...@@ -14,14 +14,21 @@ limitations under the License. */
#pragma once #pragma once
#include <execinfo.h> #include <dlfcn.h> // for dladdr
#include <execinfo.h> // for backtrace
#include <iomanip> #include <iomanip>
#include <memory>
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
#include <string> #include <string>
#include "paddle/string/printf.h" #include "paddle/string/printf.h"
#include "paddle/string/to_string.h" #include "paddle/string/to_string.h"
#ifdef __GNUC__
#include <cxxabi.h> // for __cxa_demangle
#endif
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
...@@ -39,6 +46,19 @@ limitations under the License. */ ...@@ -39,6 +46,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace platform { namespace platform {
namespace {
#ifdef __GNUC__
inline std::string demangle(std::string name) {
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif
}
struct EnforceNotMet : public std::exception { struct EnforceNotMet : public std::exception {
std::exception_ptr exp_; std::exception_ptr exp_;
std::string err_str_; std::string err_str_;
...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception { ...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception {
std::rethrow_exception(exp_); std::rethrow_exception(exp_);
} catch (const std::exception& exp) { } catch (const std::exception& exp) {
std::ostringstream sout; std::ostringstream sout;
sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl;
sout << "Call Stacks: " << std::endl; sout << "PaddlePaddle Call Stacks: " << std::endl;
void* call_stack[TRACE_STACK_LIMIT]; void* call_stack[TRACE_STACK_LIMIT];
int sz = backtrace(call_stack, TRACE_STACK_LIMIT); auto size = backtrace(call_stack, TRACE_STACK_LIMIT);
auto line = backtrace_symbols(call_stack, sz); auto symbols = backtrace_symbols(call_stack, size);
for (int i = 0; i < sz; ++i) {
sout << line[i] << std::endl; Dl_info info;
for (int i = 0; i < size; ++i) {
if (dladdr(call_stack[i], &info)) {
auto demangled = demangle(info.dli_sname);
auto addr_offset = static_cast<char*>(call_stack[i]) -
static_cast<char*>(info.dli_saddr);
sout << string::Sprintf("%-3d %*0p %s + %zd\n", i,
2 + sizeof(void*) * 2, call_stack[i],
demangled, addr_offset);
} else {
sout << string::Sprintf("%-3d %*0p %s\n", i, 2 + sizeof(void*) * 2,
call_stack[i]);
}
} }
free(line); free(symbols);
err_str_ = sout.str(); err_str_ = sout.str();
} }
} }
...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) { ...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) {
* PADDLE_ENFORCE_EQ(a, b); * PADDLE_ENFORCE_EQ(a, b);
* *
* will raise an expression described as follows: * will raise an expression described as follows:
* "enforce a == b failed, 1 != 2" with detailed stack infomation. * "enforce a == b failed, 1 != 2" with detailed stack information.
* *
* extra messages is also supported, for example: * extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2) * PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
......
...@@ -298,8 +298,8 @@ def pnpair_evaluator( ...@@ -298,8 +298,8 @@ def pnpair_evaluator(
input, input,
label, label,
info, info,
name=None, weight=None,
weight=None, ): name=None, ):
""" """
Positive-negative pair rate Evaluator which adapts to rank task like Positive-negative pair rate Evaluator which adapts to rank task like
learning to rank. This evaluator must contain at least three layers. learning to rank. This evaluator must contain at least three layers.
...@@ -308,27 +308,31 @@ def pnpair_evaluator( ...@@ -308,27 +308,31 @@ def pnpair_evaluator(
.. code-block:: python .. code-block:: python
eval = pnpair_evaluator(input, info, label) eval = pnpair_evaluator(input, label, info)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name. The output prediction of network. :param input: Input Layer name. The output prediction of network.
:type input: LayerOutput :type input: LayerOutput
:param label: Label layer name. :param label: Label layer name.
:type label: LayerOutput :type label: LayerOutput
:param info: Label layer name. (TODO, explaination) :param info: Info layer name. (TODO, explaination)
:type info: LayerOutput :type info: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size :param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination) [sample_num, 1]. (TODO, explaination)
:type weight: LayerOutput :type weight: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
""" """
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
if info:
input.append(info)
evaluator_base( evaluator_base(
name=name,
type="pnpair",
input=input, input=input,
label=label, type="pnpair",
info=info, weight=weight,
weight=weight) name=name, )
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
...@@ -429,12 +433,12 @@ def chunk_evaluator( ...@@ -429,12 +433,12 @@ def chunk_evaluator(
.. code-block:: text .. code-block:: text
Scheme Description Scheme Description
plain Use the same label for the whole chunk. plain Use the same label for the whole chunk.
IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside.
IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside. IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside.
IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk.
To make it clear, let's illustrate by an NER example. To make it clear, let's illustrate by an NER example.
Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here, Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here,
if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O, if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O,
...@@ -451,7 +455,7 @@ def chunk_evaluator( ...@@ -451,7 +455,7 @@ def chunk_evaluator(
tagType = label % numTagType tagType = label % numTagType
chunkType = label / numTagType chunkType = label / numTagType
otherChunkType = numChunkTypes otherChunkType = numChunkTypes
The following table shows the mapping rule between tagType and tag type in each scheme. The following table shows the mapping rule between tagType and tag type in each scheme.
.. code-block:: text .. code-block:: text
...@@ -475,7 +479,7 @@ def chunk_evaluator( ...@@ -475,7 +479,7 @@ def chunk_evaluator(
O 6 O 6
In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is
"IOB" so tagType has two values: 0 for B and 1 for I. "IOB" so tagType has two values: 0 for B and 1 for I.
Here we will use I-LOC to explain the above mapping rules in detail. Here we will use I-LOC to explain the above mapping rules in detail.
For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC
and the tag is I. and the tag is I.
...@@ -486,7 +490,7 @@ def chunk_evaluator( ...@@ -486,7 +490,7 @@ def chunk_evaluator(
eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types) eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types)
:param input: The input layers. :param input: The input layers.
:type input: LayerOutput :type input: LayerOutput
:param label: An input layer containing the ground truth label. :param label: An input layer containing the ground truth label.
......
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2
import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2
def get_all_op_protos(): def get_all_op_protos():
...@@ -12,11 +10,15 @@ def get_all_op_protos(): ...@@ -12,11 +10,15 @@ def get_all_op_protos():
protostrs = core.get_all_op_protos() protostrs = core.get_all_op_protos()
ret_values = [] ret_values = []
for pbstr in protostrs: for pbstr in protostrs:
op_proto = op_proto_pb2.OpProto.FromString(str(pbstr)) op_proto = framework_pb2.OpProto.FromString(str(pbstr))
ret_values.append(op_proto) ret_values.append(op_proto)
return ret_values return ret_values
def is_str(s):
return isinstance(s, str) or isinstance(s, unicode)
class OpDescCreationMethod(object): class OpDescCreationMethod(object):
""" """
A Functor object to convert user input(use key word args) to OpDesc based on A Functor object to convert user input(use key word args) to OpDesc based on
...@@ -27,7 +29,7 @@ class OpDescCreationMethod(object): ...@@ -27,7 +29,7 @@ class OpDescCreationMethod(object):
""" """
def __init__(self, op_proto): def __init__(self, op_proto):
if not isinstance(op_proto, op_proto_pb2.OpProto): if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("Argument should be OpProto") raise TypeError("Argument should be OpProto")
self.__op_proto__ = op_proto self.__op_proto__ = op_proto
...@@ -39,26 +41,34 @@ class OpDescCreationMethod(object): ...@@ -39,26 +41,34 @@ class OpDescCreationMethod(object):
""" """
if len(args) != 0: if len(args) != 0:
raise ValueError("Only keyword arguments is supported by Paddle") raise ValueError("Only keyword arguments is supported by Paddle")
op_desc = op_desc_pb2.OpDesc() op_desc = framework_pb2.OpDesc()
# Inputs for input_parameter in self.__op_proto__.inputs:
ipts, ipt_format, _ = OpDescCreationMethod.extract_input_or_output( input_arguments = kwargs.get(input_parameter.name, [])
"input", kwargs, self.__op_proto__.inputs) if is_str(input_arguments):
op_desc.inputs.extend(ipts) input_arguments = [input_arguments]
if ipt_format is not None:
op_desc.attrs.extend([ipt_format]) if not input_parameter.duplicable and len(input_arguments) > 1:
raise ValueError("Input %s only accepts one input, but give %d"
# Outputs % (input_parameter.name, len(input_arguments)))
outs, out_format, tmp_index = OpDescCreationMethod.extract_input_or_output(
"output", kwargs, self.__op_proto__.outputs) ipt = op_desc.inputs.add()
op_desc.outputs.extend(outs) ipt.parameter = input_parameter.name
if out_format is not None: ipt.arguments.extend(input_arguments)
op_desc.attrs.extend([out_format])
if len(tmp_index) != 0: for output_parameter in self.__op_proto__.outputs:
tmp_index_attr = op_desc.attrs.add() output_arguments = kwargs.get(output_parameter.name, [])
tmp_index_attr.type = attribute_pb2.INTS if is_str(output_arguments):
tmp_index_attr.name = "temporary_index" output_arguments = [output_arguments]
tmp_index_attr.ints.extend(tmp_index)
if not output_parameter.duplicable and len(output_arguments) > 1:
raise ValueError(
"Output %s only accepts one output, but give %d" %
(output_parameter.name, len(output_arguments)))
out = op_desc.outputs.add()
out.parameter = output_parameter.name
out.arguments.extend(output_arguments)
# Types # Types
op_desc.type = self.__op_proto__.type op_desc.type = self.__op_proto__.type
...@@ -72,17 +82,17 @@ class OpDescCreationMethod(object): ...@@ -72,17 +82,17 @@ class OpDescCreationMethod(object):
new_attr = op_desc.attrs.add() new_attr = op_desc.attrs.add()
new_attr.name = attr.name new_attr.name = attr.name
new_attr.type = attr.type new_attr.type = attr.type
if attr.type == attribute_pb2.INT: if attr.type == framework_pb2.INT:
new_attr.i = user_defined_attr new_attr.i = user_defined_attr
elif attr.type == attribute_pb2.FLOAT: elif attr.type == framework_pb2.FLOAT:
new_attr.f = user_defined_attr new_attr.f = user_defined_attr
elif attr.type == attribute_pb2.STRING: elif attr.type == framework_pb2.STRING:
new_attr.s = user_defined_attr new_attr.s = user_defined_attr
elif attr.type == attribute_pb2.INTS: elif attr.type == framework_pb2.INTS:
new_attr.ints.extend(user_defined_attr) new_attr.ints.extend(user_defined_attr)
elif attr.type == attribute_pb2.FLOATS: elif attr.type == framework_pb2.FLOATS:
new_attr.floats.extend(user_defined_attr) new_attr.floats.extend(user_defined_attr)
elif attr.type == attribute_pb2.STRINGS: elif attr.type == framework_pb2.STRINGS:
new_attr.strings.extend(user_defined_attr) new_attr.strings.extend(user_defined_attr)
else: else:
raise NotImplementedError("Not support attribute type " + raise NotImplementedError("Not support attribute type " +
...@@ -90,50 +100,6 @@ class OpDescCreationMethod(object): ...@@ -90,50 +100,6 @@ class OpDescCreationMethod(object):
return op_desc return op_desc
@staticmethod
def extract_input_or_output(in_out, kwargs, meta):
"""
Extract input variable names or output variable names from key-word
arguments, which base on VarProtos.
:param in_out: "input" or "output"
:param kwargs: key-word arguments that user inputted.
:param meta: a list of VarProto
:return: The three object will be return. The variable names. The
input_format or output_format attribute(None if the input or output is
not multiple). The temporary variable index list.
"""
multiple = OpDescCreationMethod.any_is_true((m.multiple for m in meta))
tmp_index = []
retv = []
if multiple:
var_format = op_desc_pb2.AttrDesc()
var_format.type = attribute_pb2.INTS
var_format.name = "%s_format" % in_out
var_format.ints.append(0)
for var in meta:
var_name = var.name
if var.temporary:
var_name = [core.var_names.temp()]
tmp_index.append(len(retv))
else:
var_name = kwargs.get(var_name, [])
if not isinstance(var_name, list):
var_name = [var_name]
retv.extend(var_name)
var_format.ints.append(len(var_name) + var_format.ints[-1])
return retv, var_format, tmp_index
else:
for var in meta:
if var.temporary:
retv.append(kwargs.get(var.name, core.var_names.temp()))
tmp_index.append(len(retv))
else:
retv.append(kwargs.get(var.name, core.var_names.empty()))
return retv, None, tmp_index
@staticmethod @staticmethod
def any_is_true(generator): def any_is_true(generator):
""" """
...@@ -146,13 +112,12 @@ class OpDescCreationMethod(object): ...@@ -146,13 +112,12 @@ class OpDescCreationMethod(object):
class OpInfo(object): class OpInfo(object):
def __init__(self, name, method, inputs, outputs, attrs, no_temp_outputs): def __init__(self, name, method, inputs, outputs, attrs):
self.name = name self.name = name
self.method = method self.method = method
self.inputs = inputs self.inputs = inputs
self.outputs = outputs self.outputs = outputs
self.attrs = attrs self.attrs = attrs
self.no_temp_outputs = no_temp_outputs
def create_op_creation_method(op_proto): def create_op_creation_method(op_proto):
...@@ -170,10 +135,7 @@ def create_op_creation_method(op_proto): ...@@ -170,10 +135,7 @@ def create_op_creation_method(op_proto):
name=op_proto.type, name=op_proto.type,
inputs=[var.name for var in op_proto.inputs], inputs=[var.name for var in op_proto.inputs],
outputs=[var.name for var in op_proto.outputs], outputs=[var.name for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs], attrs=[attr.name for attr in op_proto.attrs])
no_temp_outputs=[
var.name for var in op_proto.outputs if not var.temporary
])
class OperatorFactory(object): class OperatorFactory(object):
...@@ -214,8 +176,5 @@ class OperatorFactory(object): ...@@ -214,8 +176,5 @@ class OperatorFactory(object):
def get_op_attr_names(self, type): def get_op_attr_names(self, type):
return self.get_op_info(type).attrs return self.get_op_info(type).attrs
def get_op_no_temp_output_names(self, type):
return self.get_op_info(type).no_temp_outputs
Operator = OperatorFactory() # Default global factory Operator = OperatorFactory() # Default global factory
...@@ -24,3 +24,4 @@ py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) ...@@ -24,3 +24,4 @@ py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
py_test(test_operator SRCS test_operator.py) py_test(test_operator SRCS test_operator.py)
# py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) # py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
py_test(test_uniform_random_op SRCS test_uniform_random_op.py) py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
py_test(test_recurrent_op SRCS test_recurrent_op.py)
...@@ -53,15 +53,18 @@ def get_numeric_gradient(op, ...@@ -53,15 +53,18 @@ def get_numeric_gradient(op,
tensor.set(input_values[var_name], core.CPUPlace()) tensor.set(input_values[var_name], core.CPUPlace())
# Create all output variable in local_scope # Create all output variable in local_scope
for output in op.outputs(): opts = op.outputs()
if local_scope.find_var(output) is None: for key in opts:
local_scope.new_var(output).get_tensor() for output in opts[key]:
if local_scope.find_var(output) is None:
local_scope.new_var(output).get_tensor()
op.infer_shape(local_scope) op.infer_shape(local_scope)
# allocate output memory # allocate output memory
for output in op.outputs(): for key in opts:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace()) for output in opts[key]:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
))
# TODO(yuyang18): Only CPU is support now. # TODO(yuyang18): Only CPU is support now.
cpu_ctx = core.DeviceContext.create(core.CPUPlace()) cpu_ctx = core.DeviceContext.create(core.CPUPlace())
...@@ -150,19 +153,24 @@ class GradientChecker(unittest.TestCase): ...@@ -150,19 +153,24 @@ class GradientChecker(unittest.TestCase):
if no_grad_set is None: if no_grad_set is None:
no_grad_set = set() no_grad_set = set()
tmp_outs = forward_op.temp_outputs() no_tmp_out = forward_op.no_intermediate_outputs()
no_tmp_out = filter(lambda name: name not in tmp_outs,
forward_op.outputs())
if len(no_tmp_out) != 1: if len(no_tmp_out) != 1:
raise ValueError("non temp out_names should be 1") raise ValueError("non temp out_names should be 1")
in_names = forward_op.inputs() inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
outputs = forward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
for no_grad in no_grad_set: for no_grad in no_grad_set:
if no_grad not in in_names: if no_grad not in in_names:
raise ValueError("no_grad should be in in_names") raise ValueError("no_grad should be in in_names")
backward_op = core.Operator.backward(forward_op, no_grad_set) backward_op = core.Operator.backward(forward_op, no_grad_set)
bwd_outputs = backward_op.outputs()
bwd_out_names = [item for k in bwd_outputs for item in bwd_outputs[k]]
places = [core.CPUPlace()] places = [core.CPUPlace()]
if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu(): if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
places.append(core.GPUPlace(0)) places.append(core.GPUPlace(0))
...@@ -188,7 +196,7 @@ class GradientChecker(unittest.TestCase): ...@@ -188,7 +196,7 @@ class GradientChecker(unittest.TestCase):
var.set(value, place) var.set(value, place)
# create output var # create output var
for out_name in forward_op.outputs(): for out_name in out_names:
scope.new_var(out_name).get_tensor() scope.new_var(out_name).get_tensor()
# infer the shape of output var and compute/set value of output var # infer the shape of output var and compute/set value of output var
...@@ -198,7 +206,7 @@ class GradientChecker(unittest.TestCase): ...@@ -198,7 +206,7 @@ class GradientChecker(unittest.TestCase):
# create output grad var # create output grad var
# set shape as the output var # set shape as the output var
# set value of this grad to ones # set value of this grad to ones
for name in forward_op.outputs(): for name in out_names:
out_tensor = scope.find_var(name).get_tensor() out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape()) grad_tensor.set_dims(out_tensor.shape())
...@@ -206,7 +214,7 @@ class GradientChecker(unittest.TestCase): ...@@ -206,7 +214,7 @@ class GradientChecker(unittest.TestCase):
grad_tensor.set(data, place) grad_tensor.set(data, place)
# create input grad var # create input grad var
for name in backward_op.outputs(): for name in bwd_out_names:
scope.new_var(name).get_tensor() scope.new_var(name).get_tensor()
# infer the shape of input gradient var and compute/set it's value # infer the shape of input gradient var and compute/set it's value
......
...@@ -19,14 +19,5 @@ class TestAddOp(unittest.TestCase): ...@@ -19,14 +19,5 @@ class TestAddOp(unittest.TestCase):
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
class TestAddGradOp(unittest.TestCase):
def test_add_grad(self):
op = Operator('add_two', X="X", Y="Y", Out="Out")
backward_op = core.Operator.backward(op, set())
self.assertEqual(backward_op.type(), "add_two_grad")
expected = '''Op(add_two_grad), inputs:(X, Y, Out, Out@GRAD), outputs:(X@GRAD, Y@GRAD).'''
self.assertEqual(expected, str(backward_op))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -25,12 +25,12 @@ class TestNet(unittest.TestCase): ...@@ -25,12 +25,12 @@ class TestNet(unittest.TestCase):
net.complete_add_op(True) net.complete_add_op(True)
expected = ''' expected = '''
Op(plain_net), inputs:(W, X, Y), outputs:(Out, fc.out, pre_activation). Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.
Op(add_two), inputs:(X, Y), outputs:(Out). Op(add_two), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}.
Op(plain_net), inputs:(W, X), outputs:(fc.out, pre_activation). Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(plain_net), inputs:(W, X), outputs:(fc.out, pre_activation). Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:(X, W), outputs:(pre_activation). Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
Op(sigmoid), inputs:(pre_activation), outputs:(fc.out). Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}.
''' '''
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
......
import unittest import unittest
import paddle.v2.framework.op as op import paddle.v2.framework.op as op
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2
import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2
class TestGetAllProtos(unittest.TestCase): class TestGetAllProtos(unittest.TestCase):
...@@ -17,7 +15,7 @@ class TestGetAllProtos(unittest.TestCase): ...@@ -17,7 +15,7 @@ class TestGetAllProtos(unittest.TestCase):
class TestOpDescCreationMethod(unittest.TestCase): class TestOpDescCreationMethod(unittest.TestCase):
def test_plain_input_output(self): def test_plain_input_output(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "test" op_proto.type = "test"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "X" ipt.name = "X"
...@@ -37,25 +35,32 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -37,25 +35,32 @@ class TestOpDescCreationMethod(unittest.TestCase):
method = op.OpDescCreationMethod(op_proto) method = op.OpDescCreationMethod(op_proto)
output = method(X="a", Y="b", Z="c") output = method(X="a", Y="b", Z="c")
expected = framework_pb2.OpDesc()
expected = op_desc_pb2.OpDesc()
expected.type = "test" expected.type = "test"
expected.inputs.extend(["a", "b"]) ipt_0 = expected.inputs.add()
expected.outputs.append("c") ipt_0.parameter = "X"
ipt_0.arguments.extend(["a"])
ipt_1 = expected.inputs.add()
ipt_1.parameter = 'Y'
ipt_1.arguments.extend(['b'])
opt = expected.outputs.add()
opt.parameter = "Z"
opt.arguments.extend(["c"])
self.assertEqual(expected, output) self.assertEqual(expected, output)
def test_multiple_input_plain_output(self): def test_multiple_input_plain_output(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "fc" op_proto.type = "fc"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "X" ipt.name = "X"
ipt.comment = "" ipt.comment = ""
ipt.multiple = True ipt.duplicable = True
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "W" ipt.name = "W"
ipt.comment = "" ipt.comment = ""
ipt.multiple = True ipt.duplicable = True
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "b" ipt.name = "b"
...@@ -70,30 +75,50 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -70,30 +75,50 @@ class TestOpDescCreationMethod(unittest.TestCase):
method = op.OpDescCreationMethod(op_proto) method = op.OpDescCreationMethod(op_proto)
generated1 = method(X="x", W="w", b="b", Y="y") generated1 = method(X="x", W="w", b="b", Y="y")
expected1 = op_desc_pb2.OpDesc() expected1 = framework_pb2.OpDesc()
expected1.inputs.extend(['x', 'w', 'b']) tmp = expected1.inputs.add()
expected1.outputs.extend(['y']) tmp.parameter = "X"
tmp.arguments.extend(['x'])
tmp = expected1.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w'])
tmp = expected1.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected1.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected1.type = 'fc' expected1.type = 'fc'
attr = expected1.attrs.add()
attr.name = 'input_format'
attr.type = attribute_pb2.INTS
attr.ints.extend([0, 1, 2, 3])
self.assertEqual(expected1, generated1) self.assertEqual(expected1, generated1)
generated2 = method( generated2 = method(
X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y') X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y')
expected2 = op_desc_pb2.OpDesc() expected2 = framework_pb2.OpDesc()
expected2.inputs.extend(['x1', 'x2', 'x3', 'w1', 'w2', 'w3', 'b'])
expected2.outputs.extend(['y']) tmp = expected2.inputs.add()
tmp.parameter = "X"
tmp.arguments.extend(['x1', 'x2', 'x3'])
tmp = expected2.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w1', 'w2', 'w3'])
tmp = expected2.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected2.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected2.type = 'fc' expected2.type = 'fc'
attr = expected2.attrs.add()
attr.name = 'input_format'
attr.type = attribute_pb2.INTS
attr.ints.extend([0, 3, 6, 7])
self.assertEqual(expected2, generated2) self.assertEqual(expected2, generated2)
def test_attrs(self): def test_attrs(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "test" op_proto.type = "test"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = 'X' ipt.name = 'X'
...@@ -105,12 +130,12 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -105,12 +130,12 @@ class TestOpDescCreationMethod(unittest.TestCase):
attr.comment = "" attr.comment = ""
attr.type = type attr.type = type
__add_attr__("int_attr", attribute_pb2.INT) __add_attr__("int_attr", framework_pb2.INT)
__add_attr__("float_attr", attribute_pb2.FLOAT) __add_attr__("float_attr", framework_pb2.FLOAT)
__add_attr__("string_attr", attribute_pb2.STRING) __add_attr__("string_attr", framework_pb2.STRING)
__add_attr__("ints_attr", attribute_pb2.INTS) __add_attr__("ints_attr", framework_pb2.INTS)
__add_attr__("floats_attr", attribute_pb2.FLOATS) __add_attr__("floats_attr", framework_pb2.FLOATS)
__add_attr__("strings_attr", attribute_pb2.STRINGS) __add_attr__("strings_attr", framework_pb2.STRINGS)
op_proto.comment = "" op_proto.comment = ""
self.assertTrue(op_proto.IsInitialized()) self.assertTrue(op_proto.IsInitialized())
...@@ -126,76 +151,52 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -126,76 +151,52 @@ class TestOpDescCreationMethod(unittest.TestCase):
floats_attr=[0.2, 3.2, 4.5], floats_attr=[0.2, 3.2, 4.5],
strings_attr=["a", "b", "c"]) strings_attr=["a", "b", "c"])
expected = op_desc_pb2.OpDesc() expected = framework_pb2.OpDesc()
expected.type = "test" expected.type = "test"
expected.inputs.extend(['a'])
ipt = expected.inputs.add()
ipt.parameter = "X"
ipt.arguments.extend(['a'])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "int_attr" attr.name = "int_attr"
attr.type = attribute_pb2.INT attr.type = framework_pb2.INT
attr.i = 10 attr.i = 10
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "float_attr" attr.name = "float_attr"
attr.type = attribute_pb2.FLOAT attr.type = framework_pb2.FLOAT
attr.f = 3.2 attr.f = 3.2
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "string_attr" attr.name = "string_attr"
attr.type = attribute_pb2.STRING attr.type = framework_pb2.STRING
attr.s = "test_str" attr.s = "test_str"
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "ints_attr" attr.name = "ints_attr"
attr.type = attribute_pb2.INTS attr.type = framework_pb2.INTS
attr.ints.extend([0, 1, 2, 3, 4]) attr.ints.extend([0, 1, 2, 3, 4])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = 'floats_attr' attr.name = 'floats_attr'
attr.type = attribute_pb2.FLOATS attr.type = framework_pb2.FLOATS
attr.floats.extend([0.2, 3.2, 4.5]) attr.floats.extend([0.2, 3.2, 4.5])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = 'strings_attr' attr.name = 'strings_attr'
attr.type = attribute_pb2.STRINGS attr.type = framework_pb2.STRINGS
attr.strings.extend(['a', 'b', 'c']) attr.strings.extend(['a', 'b', 'c'])
self.assertEqual(expected, generated) self.assertEqual(expected, generated)
def test_input_temporary_output(self):
op_proto = op_proto_pb2.OpProto()
op_proto.type = "test"
out = op_proto.outputs.add()
out.name = "OUT"
out.comment = ""
out = op_proto.outputs.add()
out.name = "TMP"
out.comment = ""
out.temporary = True
out = op_proto.outputs.add()
out.name = "OUT2"
out.comment = ""
op_proto.comment = ""
method = op.OpDescCreationMethod(op_proto)
generated = method(OUT="a", OUT2="b")
desc = op_desc_pb2.OpDesc()
desc.outputs.extend(["a", core.var_names.temp(), "b"])
desc.type = "test"
attr = desc.attrs.add()
attr.name = "temporary_index"
attr.type = attribute_pb2.INTS
attr.ints.append(2)
self.assertEqual(generated, desc)
class TestOpCreations(unittest.TestCase): class TestOpCreations(unittest.TestCase):
def test_all(self): def test_all(self):
add_op = op.Operator("add_two", X="a", Y="b", Out="z") add_op = op.Operator("add_two", X="a", Y="b", Out="z")
self.assertIsNotNone(add_op) self.assertIsNotNone(add_op)
# Invoke C++ DebugString() # Invoke C++ DebugString()
self.assertEqual('Op(add_two), inputs:(a, b), outputs:(z).', self.assertEqual('Op(add_two), inputs:{X[a], Y[b]}, outputs:{Out[z]}.',
str(add_op)) str(add_op))
......
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_lib import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.attribute_pb2 as attr_type_lib
import unittest import unittest
class TestFrameworkProto(unittest.TestCase): class TestFrameworkProto(unittest.TestCase):
def test_all(self): def test_all(self):
op_proto = op_proto_lib.OpProto() op_proto = framework_pb2.OpProto()
ipt0 = op_proto.inputs.add() ipt0 = op_proto.inputs.add()
ipt0.name = "a" ipt0.name = "a"
ipt0.comment = "the input of cosine op" ipt0.comment = "the input of cosine op"
...@@ -19,7 +18,7 @@ class TestFrameworkProto(unittest.TestCase): ...@@ -19,7 +18,7 @@ class TestFrameworkProto(unittest.TestCase):
attr = op_proto.attrs.add() attr = op_proto.attrs.add()
attr.name = "scale" attr.name = "scale"
attr.comment = "scale of cosine op" attr.comment = "scale of cosine op"
attr.type = attr_type_lib.FLOAT attr.type = framework_pb2.FLOAT
op_proto.type = "cos" op_proto.type = "cos"
self.assertTrue(op_proto.IsInitialized()) self.assertTrue(op_proto.IsInitialized())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册