提交 c2c2d610 编写于 作者: C chengduoZH

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into Add_pool_op

...@@ -25,7 +25,7 @@ function(target_circle_link_libraries TARGET_NAME) ...@@ -25,7 +25,7 @@ function(target_circle_link_libraries TARGET_NAME)
endif() endif()
endforeach() endforeach()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
if(IOS AND NOT IOS_ENABLE_BITCODE) if(NOT IOS_ENABLE_BITCODE)
list(APPEND LIBS "-undefined dynamic_lookup") list(APPEND LIBS "-undefined dynamic_lookup")
endif() endif()
endif() endif()
......
...@@ -158,17 +158,23 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 ...@@ -158,17 +158,23 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字
这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。
7. \*-cp27mu-linux_x86_64.whl is not a supported wheel on this platform. 7. paddlepaddle\*.whl is not a supported wheel on this platform.
------------------------------------------------------------------------ ------------------------------------------------------------------------
出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的, 出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。最新的paddlepaddle python安装包支持Linux x86_64和MacOS 10.12操作系统,并安装了python 2.7和pip 9.0.1。
而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。
更新 :code:`pip` 包的方法是\: 更新 :code:`pip` 包的方法是\:
.. code-block:: bash .. code-block:: bash
pip install --upgrade pip pip install --upgrade pip
如果还不行,可以执行 :code:`python -c "import pip; print(pip.pep425tags.get_supported())"` 获取当前系统支持的python包的后缀,
并对比是否和正在安装的后缀一致。
如果系统支持的是 :code:`linux_x86_64` 而安装包是 :code:`manylinux1_x86_64` ,需要升级pip版本到最新;
如果系统支持 :code:`manylinux1_x86_64` 而安装包(本地)是 :code:`linux_x86_64` ,可以重命名这个whl包为 :code:`manylinux1_x86_64` 再安装。
8. python相关的单元测试都过不了 8. python相关的单元测试都过不了
-------------------------------- --------------------------------
...@@ -310,7 +316,7 @@ Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异 ...@@ -310,7 +316,7 @@ Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异
* 模型一直不收敛,发散到了一个数值特别大的地方。 * 模型一直不收敛,发散到了一个数值特别大的地方。
* 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。 * 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。
主要的解决办法是减小学习或者对数据进行归一化处理。 主要的解决办法是减小学习或者对数据进行归一化处理。
15. 编译安装后执行 import paddle.v2 as paddle 报ImportError: No module named v2 15. 编译安装后执行 import paddle.v2 as paddle 报ImportError: No module named v2
------------------------------------------------------------------------ ------------------------------------------------------------------------
...@@ -373,3 +379,15 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数 ...@@ -373,3 +379,15 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数
parameters = paddle.parameters.create(my_cost) parameters = paddle.parameters.create(my_cost)
parameters.set('emb', load_parameter(emb_param_file, 30000, 256)) parameters.set('emb', load_parameter(emb_param_file, 30000, 256))
18. 集群多节点训练,日志中保存均为网络通信类错误
------------------------------
集群多节点训练,日志报错为网络通信类错误,比如 :code:`Connection reset by peer` 等。
此类报错通常是由于某一个节点的错误导致这个节点的训练进程退出,从而引发其他节点无法连接导致,可以参考下面的步骤排查:
* 从 :code:`train.log` , :code:`server.log` 找到最早报错的地方,查看是否是其他错误引发的报错(比如FPE,内存不足,磁盘空间不足等)。
* 如果发现最早的报错就是网络通信的问题,很有可能是非独占方式执行导致的端口冲突,可以联系OP,看当前MPI集群是否支持resource=full参数提交,如果支持增加此参数提交,并更换job 端口。
* 如果当前MPI集群并不支持任务独占模式,可以联系OP是否可以更换集群或升级当前集群。
\ No newline at end of file
...@@ -19,6 +19,15 @@ limitations under the License. */ ...@@ -19,6 +19,15 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
static ProgramDesc* g_program_desc = nullptr;
ProgramDesc& GetProgramDesc() {
if (g_program_desc == nullptr) {
g_program_desc = new ProgramDesc();
}
return *g_program_desc;
}
template <> template <>
AttrType AttrTypeID<int>() { AttrType AttrTypeID<int>() {
return INT; return INT;
...@@ -47,40 +56,44 @@ template <> ...@@ -47,40 +56,44 @@ template <>
AttrType AttrTypeID<std::vector<std::pair<int, int>>>() { AttrType AttrTypeID<std::vector<std::pair<int, int>>>() {
return INT_PAIRS; return INT_PAIRS;
} }
template <>
AttrType AttrTypeID<BlockDesc>() {
return BLOCK;
}
Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
switch (attr_desc.type()) { switch (attr_desc.type()) {
case paddle::framework::AttrType::INT: { case framework::AttrType::INT: {
return attr_desc.i(); return attr_desc.i();
} }
case paddle::framework::AttrType::FLOAT: { case framework::AttrType::FLOAT: {
return attr_desc.f(); return attr_desc.f();
} }
case paddle::framework::AttrType::STRING: { case framework::AttrType::STRING: {
return attr_desc.s(); return attr_desc.s();
} }
case paddle::framework::AttrType::INTS: { case framework::AttrType::INTS: {
std::vector<int> val(attr_desc.ints_size()); std::vector<int> val(attr_desc.ints_size());
for (int i = 0; i < attr_desc.ints_size(); ++i) { for (int i = 0; i < attr_desc.ints_size(); ++i) {
val[i] = attr_desc.ints(i); val[i] = attr_desc.ints(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::FLOATS: { case framework::AttrType::FLOATS: {
std::vector<float> val(attr_desc.floats_size()); std::vector<float> val(attr_desc.floats_size());
for (int i = 0; i < attr_desc.floats_size(); ++i) { for (int i = 0; i < attr_desc.floats_size(); ++i) {
val[i] = attr_desc.floats(i); val[i] = attr_desc.floats(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::STRINGS: { case framework::AttrType::STRINGS: {
std::vector<std::string> val(attr_desc.strings_size()); std::vector<std::string> val(attr_desc.strings_size());
for (int i = 0; i < attr_desc.strings_size(); ++i) { for (int i = 0; i < attr_desc.strings_size(); ++i) {
val[i] = attr_desc.strings(i); val[i] = attr_desc.strings(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::INT_PAIRS: { case framework::AttrType::INT_PAIRS: {
std::vector<std::pair<int, int>> val(attr_desc.int_pairs_size()); std::vector<std::pair<int, int>> val(attr_desc.int_pairs_size());
for (int i = 0; i < attr_desc.int_pairs_size(); ++i) { for (int i = 0; i < attr_desc.int_pairs_size(); ++i) {
val[i].first = attr_desc.int_pairs(i).first(); val[i].first = attr_desc.int_pairs(i).first();
...@@ -88,6 +101,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { ...@@ -88,6 +101,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
} }
return val; return val;
} }
case framework::AttrType::BLOCK: {
return GetProgramDesc().mutable_blocks(attr_desc.block_idx());
}
} }
PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !");
return boost::blank(); return boost::blank();
......
...@@ -29,11 +29,13 @@ namespace framework { ...@@ -29,11 +29,13 @@ namespace framework {
typedef boost::variant<boost::blank, int, float, std::string, std::vector<int>, typedef boost::variant<boost::blank, int, float, std::string, std::vector<int>,
std::vector<float>, std::vector<std::string>, std::vector<float>, std::vector<std::string>,
std::vector<std::pair<int, int>>> std::vector<std::pair<int, int>>, BlockDesc*>
Attribute; Attribute;
typedef std::unordered_map<std::string, Attribute> AttributeMap; typedef std::unordered_map<std::string, Attribute> AttributeMap;
ProgramDesc& GetProgramDesc();
template <typename T> template <typename T>
AttrType AttrTypeID(); AttrType AttrTypeID();
......
...@@ -166,9 +166,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive( ...@@ -166,9 +166,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill // If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient. // zero variables to that input gradient.
net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}},
{{"Src", {prefix}}}, {{"Y", {grad_input}}}, {}));
{{"Dst", {grad_input}}}, {}));
} }
return false; return false;
}); });
......
...@@ -127,8 +127,8 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { ...@@ -127,8 +127,8 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker {
public: public:
FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Src", "x"); AddInput("X", "x");
AddOutput("Dst", "out"); AddOutput("Y", "out");
AddComment(""); AddComment("");
} }
}; };
...@@ -325,10 +325,10 @@ TEST(Backward, op_part_of_output_are_not_need) { ...@@ -325,10 +325,10 @@ TEST(Backward, op_part_of_output_are_not_need) {
auto &fill_zero = *net->ops_[0]; auto &fill_zero = *net->ops_[0];
ASSERT_EQ("fill_zeros_like", fill_zero.Type()); ASSERT_EQ("fill_zeros_like", fill_zero.Type());
ASSERT_EQ(1UL, fill_zero.Inputs("Src").size()); ASSERT_EQ(1UL, fill_zero.Inputs("X").size());
ASSERT_EQ("Z", fill_zero.Input("Src")); ASSERT_EQ("Z", fill_zero.Input("X"));
ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size()); ASSERT_EQ(1UL, fill_zero.Outputs("Y").size());
ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst")); ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Y"));
auto &d_many_out = *net->ops_[1]; auto &d_many_out = *net->ops_[1];
ASSERT_EQ("many_output_op_grad", d_many_out.Type()); ASSERT_EQ("many_output_op_grad", d_many_out.Type());
......
...@@ -23,6 +23,7 @@ enum AttrType { ...@@ -23,6 +23,7 @@ enum AttrType {
FLOATS = 4; FLOATS = 4;
STRINGS = 5; STRINGS = 5;
INT_PAIRS = 6; INT_PAIRS = 6;
BLOCK = 7;
} }
message IntPair { message IntPair {
...@@ -44,6 +45,7 @@ message OpDesc { ...@@ -44,6 +45,7 @@ message OpDesc {
repeated float floats = 7; repeated float floats = 7;
repeated string strings = 8; repeated string strings = 8;
repeated IntPair int_pairs = 9; repeated IntPair int_pairs = 9;
optional int32 block_idx = 10;
}; };
message Var { message Var {
...@@ -108,3 +110,12 @@ message VarDesc { ...@@ -108,3 +110,12 @@ message VarDesc {
required string name = 1; required string name = 1;
optional LoDTensorDesc lod_tensor = 2; optional LoDTensorDesc lod_tensor = 2;
} }
message BlockDesc {
required int32 idx = 1;
required int32 parent_idx = 2;
repeated VarDesc vars = 3;
repeated OpDesc ops = 4;
}
message ProgramDesc { repeated BlockDesc blocks = 1; }
...@@ -207,23 +207,22 @@ const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>( ...@@ -207,23 +207,22 @@ const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
} }
template <> template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const { Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const {
auto* var = OutputVar(name); auto var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<Tensor*>(GetTensorFromVar(var)); return var == nullptr ? nullptr : var->GetMutable<LoDTensor>();
} }
template <> template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>( std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
const std::string& name) const { const std::string& name) const {
auto names = op().Outputs(name); auto names = op().Outputs(name);
std::vector<Tensor*> res; std::vector<Tensor*> res;
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
auto var = scope().FindVar(sub_name); auto var = scope_.FindVar(sub_name);
return var == nullptr return var == nullptr ? nullptr
? nullptr : var->GetMutable<LoDTensor>();
: const_cast<Tensor*>(GetTensorFromVar(var));
}); });
return res; return res;
} }
......
...@@ -212,9 +212,9 @@ class InferShapeContext { ...@@ -212,9 +212,9 @@ class InferShapeContext {
return res; return res;
} }
std::vector<const Variable*> MultiOutputVar(const std::string& name) const { std::vector<Variable*> MultiOutputVar(const std::string& name) const {
auto names = op_.Outputs(name); auto names = op_.Outputs(name);
std::vector<const Variable*> res; std::vector<Variable*> res;
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[this](const std::string& name) { [this](const std::string& name) {
...@@ -271,6 +271,20 @@ class InferShapeContext { ...@@ -271,6 +271,20 @@ class InferShapeContext {
return &var->Get<Tensor>(); return &var->Get<Tensor>();
} }
void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
size_t j = 0) const {
PADDLE_ENFORCE_LT(i, InputSize(in));
PADDLE_ENFORCE_LT(j, OutputSize(out));
auto* in_var = MultiInputVar(in)[i];
auto* out_var = MultiOutputVar(out)[j];
if (!in_var->IsType<LoDTensor>()) return;
PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
"The %d-th output of Output(%s) must be LoDTensor.", j, out);
auto in_tensor = in_var->Get<LoDTensor>();
auto* out_tensor = out_var->GetMutable<LoDTensor>();
out_tensor->set_lod(in_tensor.lod());
}
private: private:
const OperatorBase& op_; const OperatorBase& op_;
const Scope& scope_; const Scope& scope_;
...@@ -283,6 +297,13 @@ template <> ...@@ -283,6 +297,13 @@ template <>
const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>( const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
const std::string& name) const; const std::string& name) const;
template <>
Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const;
template <>
std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
const std::string& name) const;
template <typename T> template <typename T>
struct EigenDeviceConverter; struct EigenDeviceConverter;
...@@ -315,38 +336,10 @@ class ExecutionContext : public InferShapeContext { ...@@ -315,38 +336,10 @@ class ExecutionContext : public InferShapeContext {
return device_context_; return device_context_;
} }
// redefine Output function,
// use Variable::Get instead of Variable::GetMutable
template <typename T>
T* Output(const std::string& name) const {
auto var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<T*>(&var->Get<T>());
}
// redefine MultiOutput function.
// use Variable::Get instead of Variable::GetMutable
template <typename T>
std::vector<T*> MultiOutput(const std::string& name) const {
auto names = op().Outputs(name);
std::vector<T*> res;
res.reserve(names.size());
std::transform(
names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { return Output<T>(sub_name); });
return res;
}
private: private:
const platform::DeviceContext& device_context_; const platform::DeviceContext& device_context_;
}; };
template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;
template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
const std::string& name) const;
class OpKernel { class OpKernel {
public: public:
/** /**
......
...@@ -39,7 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -39,7 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0],
"inference size must be the same as label size"); "inference size must be the same as label size");
ctx.Output<framework::LoDTensor>("Accuracy")->Resize({1}); ctx.Output<framework::Tensor>("Accuracy")->Resize({1});
ctx.ShareLoD("Inference", /*->*/ "Accuracy");
} }
}; };
...@@ -54,11 +55,15 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -54,11 +55,15 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
// TODO(typhoonzero): AddInput("Weight", ... // TODO(typhoonzero): AddInput("Weight", ...
AddOutput("Accuracy", "The accuracy of current batch"); AddOutput("Accuracy", "The accuracy of current batch");
AddComment( AddComment(R"DOC(
R"DOC(Accuracy. It will print accuracy rate for classification. Accuracy. It will print accuracy rate for classification.
The accuracy is: The accuracy is:
.. math:: .. math::
accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})DOC"); accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})
Both the input `Inference` and `Label` can carry the LoD (Level of Details)
information, or not. But the output only shares the LoD with input `Inference`.
)DOC");
} }
}; };
......
...@@ -23,8 +23,9 @@ class ActivationOp : public framework::OperatorWithKernel { ...@@ -23,8 +23,9 @@ class ActivationOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::LoDTensor>("Y")->Resize( ctx.Output<framework::Tensor>("Y")->Resize(
ctx.Input<framework::Tensor>("X")->dims()); ctx.Input<framework::Tensor>("X")->dims());
ctx.ShareLoD("X", /*->*/ "Y");
} }
}; };
...@@ -34,7 +35,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel { ...@@ -34,7 +35,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")) ctx.Output<framework::Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<framework::Tensor>("Y")->dims()); ->Resize(ctx.Input<framework::Tensor>("Y")->dims());
} }
}; };
......
...@@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel { ...@@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(), PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(), ctx.Input<Tensor>("Y")->dims(),
"Two input of Add Op's dimension must be same."); "Two input of Add Op's dimension must be same.");
ctx.Output<framework::LoDTensor>("Out")->Resize( ctx.Output<framework::Tensor>("Out")->Resize(
ctx.Input<Tensor>("X")->dims()); ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::LoDTensor;
class ClipOp : public framework::OperatorWithKernel { class ClipOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -29,11 +27,12 @@ class ClipOp : public framework::OperatorWithKernel { ...@@ -29,11 +27,12 @@ class ClipOp : public framework::OperatorWithKernel {
"Input(X) of ClipOp should not be null."); "Input(X) of ClipOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ClipOp should not be null."); "Output(Out) of ClipOp should not be null.");
auto x_dims = ctx.Input<LoDTensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto max = Attr<float>("max"); auto max = Attr<float>("max");
auto min = Attr<float>("min"); auto min = Attr<float>("min");
PADDLE_ENFORCE_LT(min, max, "max should be greater than min."); PADDLE_ENFORCE_LT(min, max, "max should be greater than min.");
ctx.Output<LoDTensor>("Out")->Resize(x_dims); ctx.Output<Tensor>("Out")->Resize(x_dims);
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -66,8 +65,8 @@ class ClipOpGrad : public framework::OperatorWithKernel { ...@@ -66,8 +65,8 @@ class ClipOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<LoDTensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
if (x_grad != nullptr) { if (x_grad != nullptr) {
x_grad->Resize(x_dims); x_grad->Resize(x_dims);
} }
......
...@@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel { ...@@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel {
"Output(Out) of ConcatOp should not be null."); "Output(Out) of ConcatOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X"); auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out"); auto *out = ctx.Output<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis")); size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size(); size_t n = ins.size();
......
...@@ -37,7 +37,7 @@ class Conv2DOp : public framework::OperatorWithKernel { ...@@ -37,7 +37,7 @@ class Conv2DOp : public framework::OperatorWithKernel {
auto in = ctx.Input<Tensor>("Input"); auto in = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter"); auto filter = ctx.Input<Tensor>("Filter");
auto out = ctx.Output<framework::LoDTensor>("Output"); auto out = ctx.Output<framework::Tensor>("Output");
std::vector<int> strides = Attr<std::vector<int>>("strides"); std::vector<int> strides = Attr<std::vector<int>>("strides");
std::vector<int> paddings = Attr<std::vector<int>>("paddings"); std::vector<int> paddings = Attr<std::vector<int>>("paddings");
int groups = Attr<int>("groups"); int groups = Attr<int>("groups");
...@@ -111,10 +111,9 @@ class Conv2DOpGrad : public framework::OperatorWithKernel { ...@@ -111,10 +111,9 @@ class Conv2DOpGrad : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto in = ctx.Input<Tensor>("Input"); auto in = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter"); auto filter = ctx.Input<Tensor>("Filter");
auto d_in = auto d_in = ctx.Output<framework::Tensor>(framework::GradVarName("Input"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("Input"));
auto d_filter = auto d_filter =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Filter")); ctx.Output<framework::Tensor>(framework::GradVarName("Filter"));
if (d_in) d_in->Resize(in->dims()); if (d_in) d_in->Resize(in->dims());
if (d_filter) d_filter->Resize(filter->dims()); if (d_filter) d_filter->Resize(filter->dims());
} }
......
...@@ -54,9 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel { ...@@ -54,9 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel {
" just 1 (which will be broadcasted to match Input(X))."); " just 1 (which will be broadcasted to match Input(X)).");
// resize tensor // resize tensor
ctx.Output<framework::LoDTensor>("Out")->Resize({x_dims[0], 1}); ctx.Output<framework::Tensor>("Out")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("XNorm")->Resize({x_dims[0], 1}); ctx.Output<framework::Tensor>("XNorm")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("YNorm")->Resize({y_dims[0], 1}); ctx.Output<framework::Tensor>("YNorm")->Resize({y_dims[0], 1});
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -81,10 +82,13 @@ Cosine Similarity Operator. ...@@ -81,10 +82,13 @@ Cosine Similarity Operator.
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).
Input(X) and Input(Y) must have the same shape, except that the 1st dimension The input `X` and `Y` must have the same shape, except that the 1st dimension
of Input(Y) could be just 1 (different from Input(X)), which will be of input `Y` could be just 1 (different from input `X`), which will be
broadcasted to match the shape of Input(X) before computing their cosine broadcasted to match the shape of input `X` before computing their cosine
similarity. similarity.
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
...@@ -139,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -139,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
"Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); "Shape of Input(Out@Grad) must be [X.Dim(0), 1].");
// resize tensor // resize tensor
auto *x_grad = auto *x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims); if (y_grad) y_grad->Resize(y_dims);
} }
......
...@@ -19,7 +19,6 @@ namespace paddle { ...@@ -19,7 +19,6 @@ namespace paddle {
namespace operators { namespace operators {
using framework::Tensor; using framework::Tensor;
using framework::LoDTensor;
class CropOp : public framework::OperatorWithKernel { class CropOp : public framework::OperatorWithKernel {
public: public:
...@@ -31,9 +30,9 @@ class CropOp : public framework::OperatorWithKernel { ...@@ -31,9 +30,9 @@ class CropOp : public framework::OperatorWithKernel {
"Input(X) of CropOp should not be null."); "Input(X) of CropOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of CropOp should not be null."); "Output(Out) of CropOp should not be null.");
auto x_dim = ctx.Input<LoDTensor>("X")->dims(); auto x_dim = ctx.Input<Tensor>("X")->dims();
auto *y = ctx.Input<LoDTensor>("Y"); auto *y = ctx.Input<Tensor>("Y");
auto *out = ctx.Output<LoDTensor>("Out"); auto *out = ctx.Output<Tensor>("Out");
if (y == nullptr) { if (y == nullptr) {
auto shape = Attr<std::vector<int>>("shape"); auto shape = Attr<std::vector<int>>("shape");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
...@@ -121,8 +120,8 @@ class CropOpGrad : public framework::OperatorWithKernel { ...@@ -121,8 +120,8 @@ class CropOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<LoDTensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
if (x_grad != nullptr) { if (x_grad != nullptr) {
x_grad->Resize(x_dims); x_grad->Resize(x_dims);
} }
......
...@@ -17,8 +17,6 @@ limitations under the License. */ ...@@ -17,8 +17,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::LoDTensor;
class CrossEntropyOp : public framework::OperatorWithKernel { class CrossEntropyOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -51,7 +49,8 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ...@@ -51,7 +49,8 @@ class CrossEntropyOp : public framework::OperatorWithKernel {
"Input(Label) must be 1."); "Input(Label) must be 1.");
} }
ctx.Output<LoDTensor>("Y")->Resize({x->dims()[0], 1}); ctx.Output<Tensor>("Y")->Resize({x->dims()[0], 1});
ctx.ShareLoD("X", /*->*/ "Y");
} }
}; };
...@@ -95,7 +94,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { ...@@ -95,7 +94,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel {
"Input(Label) must be 1."); "Input(Label) must be 1.");
} }
auto dx = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->Resize(x->dims()); dx->Resize(x->dims());
} }
}; };
...@@ -133,6 +132,9 @@ computation. ...@@ -133,6 +132,9 @@ computation.
As a special case of 2), when each row of Input(Label) has only one As a special case of 2), when each row of Input(Label) has only one
non-zero element (equals 1), soft-label cross-entropy degenerates to a non-zero element (equals 1), soft-label cross-entropy degenerates to a
one-hot cross-entropy with one-hot label representation. one-hot cross-entropy with one-hot label representation.
Both the input `X` and `Label` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
......
...@@ -18,7 +18,6 @@ namespace paddle { ...@@ -18,7 +18,6 @@ namespace paddle {
namespace operators { namespace operators {
using framework::Tensor; using framework::Tensor;
using framework::LoDTensor;
class DropoutOp : public framework::OperatorWithKernel { class DropoutOp : public framework::OperatorWithKernel {
public: public:
...@@ -34,10 +33,11 @@ class DropoutOp : public framework::OperatorWithKernel { ...@@ -34,10 +33,11 @@ class DropoutOp : public framework::OperatorWithKernel {
ctx.Attr<int>("is_training") == 1); ctx.Attr<int>("is_training") == 1);
auto dims = ctx.Input<Tensor>("X")->dims(); auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<LoDTensor>("Out")->Resize(dims); ctx.Output<Tensor>("Out")->Resize(dims);
if (ctx.Attr<int>("is_training") == 1) { if (ctx.Attr<int>("is_training") == 1) {
ctx.Output<LoDTensor>("Mask")->Resize(dims); ctx.Output<Tensor>("Mask")->Resize(dims);
} }
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -96,7 +96,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel { ...@@ -96,7 +96,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(x_dims, mask_dims, PADDLE_ENFORCE_EQ(x_dims, mask_dims,
"Dimensions of Input(X) and Mask must be the same."); "Dimensions of Input(X) and Mask must be the same.");
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
x_grad->Resize(x_dims); x_grad->Resize(x_dims);
} }
}; };
......
...@@ -37,7 +37,8 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { ...@@ -37,7 +37,8 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
auto y_dim = ctx.Input<Tensor>("Y")->dims(); auto y_dim = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
"Rank of first input must >= rank of second input.") "Rank of first input must >= rank of second input.")
ctx.Output<framework::LoDTensor>("Out")->Resize(x_dim); ctx.Output<framework::Tensor>("Out")->Resize(x_dim);
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -63,11 +64,15 @@ Limited elementwise multiple operator.The equation is: Out = X ⊙ Y. ...@@ -63,11 +64,15 @@ Limited elementwise multiple operator.The equation is: Out = X ⊙ Y.
2. Y's shape is a subset of X. 2. Y's shape is a subset of X.
Y will be broadcasted to match the shape of X and axis should be dimension index Y in X. Y will be broadcasted to match the shape of X and axis should be dimension index Y in X.
example: example:
shape(X) = (2, 3, 4, 5), shape(Y) = (,) shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,) shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input X.
)DOC"); )DOC");
} }
}; };
...@@ -86,10 +91,8 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel { ...@@ -86,10 +91,8 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel {
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims(); auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = auto *x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.") "Rank of first input must >= rank of second input.")
......
...@@ -186,6 +186,9 @@ W_i is a 2-D matrix of size (K x N), where N means the number of neurons ...@@ -186,6 +186,9 @@ W_i is a 2-D matrix of size (K x N), where N means the number of neurons
in the fully connected layer. B is a 1-D vector of size N. in the fully connected layer. B is a 1-D vector of size N.
Thus, the output Out is a 2-D matrix of size (M x N). Thus, the output Out is a 2-D matrix of size (M x N).
Activation type can be set to `identity` (default), `sigmoid` or `softmax`. Activation type can be set to `identity` (default), `sigmoid` or `softmax`.
All the inputs can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with first input (`X[0]`).
)DOC"); )DOC");
} }
}; };
......
...@@ -23,15 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { ...@@ -23,15 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
ctx.InputVar("Src"), "Input(X) of FillZerosLikeOp should not be null.");
"Input(Src) of FillZerosLikeOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"),
PADDLE_ENFORCE_NOT_NULL( "Output(Y) of FillZerosLikeOp should not be null.");
ctx.OutputVar("Dst"),
"Output(Dst) of FillZerosLikeOp should not be null."); ctx.Output<framework::Tensor>("Y")->Resize(
ctx.Input<framework::Tensor>("X")->dims());
ctx.Output<framework::LoDTensor>("Dst")->Resize( ctx.ShareLoD("X", /*->*/ "Y");
ctx.Input<framework::Tensor>("Src")->dims());
} }
}; };
...@@ -40,8 +39,8 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -40,8 +39,8 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker {
FillZerosLikeOpMaker(framework::OpProto *proto, FillZerosLikeOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker) framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Src", "The input of fill-zeros-like op."); AddInput("X", "The input of fill-zeros-like op.");
AddOutput("Dst", "The varibale will be filled up with zeros."); AddOutput("Y", "The varibale will be filled up with zeros.");
AddComment(R"DOC( AddComment(R"DOC(
Fill up a vriable with zeros. Fill up a vriable with zeros.
......
...@@ -23,7 +23,7 @@ template <typename Place, typename T> ...@@ -23,7 +23,7 @@ template <typename Place, typename T>
class FillZerosLikeKernel : public framework::OpKernel { class FillZerosLikeKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* output = context.Output<framework::Tensor>("Dst"); auto* output = context.Output<framework::Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*output); auto t = framework::EigenVector<T>::Flatten(*output);
t.device(context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(0)); t.device(context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(0));
......
...@@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel { ...@@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0");
framework::DDim output_dims(ctx.Input<Tensor>("X")->dims()); framework::DDim output_dims(ctx.Input<Tensor>("X")->dims());
output_dims[0] = batch_size; output_dims[0] = batch_size;
ctx.Output<framework::LoDTensor>("Out")->Resize(output_dims); ctx.Output<framework::Tensor>("Out")->Resize(output_dims);
} }
}; };
...@@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel { ...@@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto X_grad = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto X_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
X_grad->Resize(X->dims()); X_grad->Resize(X->dims());
......
...@@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ...@@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
ctx.OutputVar("Out"), ctx.OutputVar("Out"),
"Output(Out) of GaussianRandomOp should not be null."); "Output(Out) of GaussianRandomOp should not be null.");
auto* tensor = ctx.Output<framework::LoDTensor>("Out"); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = Attr<std::vector<int>>("dims"); auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(dims.size());
......
...@@ -32,9 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel { ...@@ -32,9 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel {
auto table_t = ctx.Input<Tensor>("W"); auto table_t = ctx.Input<Tensor>("W");
auto ids_t = ctx.Input<Tensor>("Ids"); auto ids_t = ctx.Input<Tensor>("Ids");
auto output_t = ctx.Output<framework::LoDTensor>("Out"); auto output_t = ctx.Output<framework::Tensor>("Out");
output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]});
ctx.ShareLoD("Ids", /*->*/ "Out");
} }
}; };
...@@ -50,9 +51,13 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -50,9 +51,13 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker {
"An input with type int32 or int64" "An input with type int32 or int64"
"contains the ids to be looked up in W."); "contains the ids to be looked up in W.");
AddOutput("Out", "The lookup results, which have the same type with W."); AddOutput("Out", "The lookup results, which have the same type with W.");
AddComment( AddComment(R"DOC(
"This operator is used to perform lookups on the parameter W," This operator is used to perform lookups on the parameter W,
"then concatenated into a dense tensor."); then concatenated into a dense tensor.
The input `Ids` can carry the LoD (Level of Details) information,
or not. And the output only shares the LoD with input `Ids`.
)DOC");
} }
}; };
...@@ -64,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { ...@@ -64,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &context) const override { void InferShape(const framework::InferShapeContext &context) const override {
auto table = context.Input<Tensor>("W"); auto table = context.Input<Tensor>("W");
auto d_table = auto d_table =
context.Output<framework::LoDTensor>(framework::GradVarName("W")); context.Output<framework::Tensor>(framework::GradVarName("W"));
d_table->Resize(table->dims()); d_table->Resize(table->dims());
} }
}; };
......
...@@ -27,7 +27,7 @@ class MeanOp : public framework::OperatorWithKernel { ...@@ -27,7 +27,7 @@ class MeanOp : public framework::OperatorWithKernel {
"Input(X) of MeanOp should not be null."); "Input(X) of MeanOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MeanOp should not be null."); "Output(Out) of MeanOp should not be null.");
ctx.Output<framework::LoDTensor>("Out")->Resize({1}); ctx.Output<framework::Tensor>("Out")->Resize({1});
} }
}; };
...@@ -37,7 +37,8 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -37,7 +37,8 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of mean op"); AddInput("X", "The input of mean op");
AddOutput("Out", "The output of mean op").NotInGradient(); AddOutput("Out", "The output of mean op").NotInGradient();
AddComment("Mean Operator"); AddComment(R"DOC( Mean Operator
)DOC");
} }
}; };
...@@ -47,7 +48,7 @@ class MeanGradOp : public framework::OperatorWithKernel { ...@@ -47,7 +48,7 @@ class MeanGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")) ctx.Output<framework::Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("X")->dims()); ->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -40,7 +40,8 @@ class MinusOp : public framework::OperatorWithKernel { ...@@ -40,7 +40,8 @@ class MinusOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
left_tensor->numel(), right_tensor->numel(), left_tensor->numel(), right_tensor->numel(),
"Minus operator must take two tensor with same num of elements"); "Minus operator must take two tensor with same num of elements");
ctx.Output<framework::LoDTensor>("Out")->Resize(left_tensor->dims()); ctx.Output<framework::Tensor>("Out")->Resize(left_tensor->dims());
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -54,7 +55,12 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -54,7 +55,12 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(Minus Operator AddComment(R"DOC(Minus Operator
Equation: Out = X - Y Equation:
Out = X - Y
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
......
...@@ -34,8 +34,8 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { ...@@ -34,8 +34,8 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(x->dims().size(), 2, "The tensor rank of X must be 2."); PADDLE_ENFORCE_EQ(x->dims().size(), 2, "The tensor rank of X must be 2.");
PADDLE_ENFORCE_EQ(x->dims()[1], 1, "The 2nd dimension of X must be 1."); PADDLE_ENFORCE_EQ(x->dims()[1], 1, "The 2nd dimension of X must be 1.");
context.Output<framework::LoDTensor>("IntermediateVal")->Resize(x->dims()); context.Output<framework::Tensor>("IntermediateVal")->Resize(x->dims());
context.Output<framework::LoDTensor>("Out")->Resize({x->dims()[0], 1}); context.Output<framework::Tensor>("Out")->Resize({x->dims()[0], 1});
} }
}; };
...@@ -81,7 +81,7 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { ...@@ -81,7 +81,7 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
auto* intermediate_val = context.Input<Tensor>("IntermediateVal"); auto* intermediate_val = context.Input<Tensor>("IntermediateVal");
auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out"));
auto* x_grad = auto* x_grad =
context.Output<framework::LoDTensor>(framework::GradVarName("X")); context.Output<framework::Tensor>(framework::GradVarName("X"));
PADDLE_ENFORCE_NOT_NULL(x, "X must be initialized."); PADDLE_ENFORCE_NOT_NULL(x, "X must be initialized.");
PADDLE_ENFORCE_NOT_NULL(y, "Y must be initialized."); PADDLE_ENFORCE_NOT_NULL(y, "Y must be initialized.");
......
...@@ -52,8 +52,8 @@ class ModifiedHuberLossKernel : public framework::OpKernel { ...@@ -52,8 +52,8 @@ class ModifiedHuberLossKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("X"); auto* in0 = context.Input<Tensor>("X");
auto* in1 = context.Input<Tensor>("Y"); auto* in1 = context.Input<Tensor>("Y");
auto* out0 = context.Output<framework::LoDTensor>("IntermediateVal"); auto* out0 = context.Output<framework::Tensor>("IntermediateVal");
auto* out1 = context.Output<framework::LoDTensor>("Out"); auto* out1 = context.Output<framework::Tensor>("Out");
out0->mutable_data<T>(context.GetPlace()); out0->mutable_data<T>(context.GetPlace());
out1->mutable_data<T>(context.GetPlace()); out1->mutable_data<T>(context.GetPlace());
...@@ -77,11 +77,9 @@ class ModifiedHuberLossGradCPUKernel : public framework::OpKernel { ...@@ -77,11 +77,9 @@ class ModifiedHuberLossGradCPUKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("Y"); auto* in0 = context.Input<Tensor>("Y");
auto* in1 = context.Input<framework::LoDTensor>("IntermediateVal"); auto* in1 = context.Input<framework::Tensor>("IntermediateVal");
auto* in2 = auto* in2 = context.Input<framework::Tensor>(framework::GradVarName("Out"));
context.Input<framework::LoDTensor>(framework::GradVarName("Out")); auto* out0 = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto* out0 =
context.Output<framework::LoDTensor>(framework::GradVarName("X"));
if (out0) { if (out0) {
const T* y_ptr = in0->data<T>(); const T* y_ptr = in0->data<T>();
......
...@@ -18,7 +18,6 @@ namespace paddle { ...@@ -18,7 +18,6 @@ namespace paddle {
namespace operators { namespace operators {
using framework::Tensor; using framework::Tensor;
using framework::LoDTensor;
class MulOp : public framework::OperatorWithKernel { class MulOp : public framework::OperatorWithKernel {
public: public:
...@@ -53,8 +52,9 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -53,8 +52,9 @@ class MulOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_mat_dims[1], y_mat_dims[0], x_mat_dims[1], y_mat_dims[0],
"First matrix's width must be equal with second matrix's height."); "First matrix's width must be equal with second matrix's height.");
ctx.Output<framework::LoDTensor>("Out")->Resize( ctx.Output<framework::Tensor>("Out")->Resize(
{x_mat_dims[0], y_mat_dims[1]}); {x_mat_dims[0], y_mat_dims[1]});
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -83,9 +83,14 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -83,9 +83,14 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(1) .SetDefault(1)
.EqualGreaterThan(1); .EqualGreaterThan(1);
AddComment(R"DOC( AddComment(R"DOC(
Two Element Mul Operator. Mul operator is used to perform matrix multiplication for input X and Y.
The equation is: Out = X * Y The equation is:
Out = X * Y
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
...@@ -103,10 +108,8 @@ class MulOpGrad : public framework::OperatorWithKernel { ...@@ -103,10 +108,8 @@ class MulOpGrad : public framework::OperatorWithKernel {
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims(); auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = auto *x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
auto x_mat_dims = auto x_mat_dims =
framework::flatten_to_2d(x_dims, Attr<int>("x_num_col_dims")); framework::flatten_to_2d(x_dims, Attr<int>("x_num_col_dims"));
......
...@@ -39,8 +39,13 @@ class PadOp : public framework::OperatorWithKernel { ...@@ -39,8 +39,13 @@ class PadOp : public framework::OperatorWithKernel {
for (int i = 0; i < x_dim.size(); ++i) { for (int i = 0; i < x_dim.size(); ++i) {
out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1];
} }
ctx.Output<framework::LoDTensor>("Out")->Resize( ctx.Output<framework::Tensor>("Out")->Resize(
framework::make_ddim(out_dims)); framework::make_ddim(out_dims));
if (out_dims[0] == x_dim[0]) {
// Only pass LoD when the first dimension is equal between
// output and input.
ctx.ShareLoD("X", /*->*/ "Out");
}
} }
}; };
...@@ -101,7 +106,7 @@ class PadOpGrad : public framework::OperatorWithKernel { ...@@ -101,7 +106,7 @@ class PadOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_g = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *x_g = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
if (x_g != nullptr) { if (x_g != nullptr) {
x_g->Resize(x_dims); x_g->Resize(x_dims);
} }
......
...@@ -36,8 +36,9 @@ class PReluOp : public framework::OperatorWithKernel { ...@@ -36,8 +36,9 @@ class PReluOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) should not be null"); "Output(Out) should not be null");
auto *out = ctx.Output<framework::LoDTensor>("Out"); auto *out = ctx.Output<framework::Tensor>("Out");
out->Resize(in->dims()); out->Resize(in->dims());
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -55,6 +56,8 @@ The equation is: ...@@ -55,6 +56,8 @@ The equation is:
f(x) = alpha * x , for x < 0 f(x) = alpha * x , for x < 0
f(x) = x , for x >= 0 f(x) = x , for x >= 0
The input `X` can carry the LoD (Level of Details) information,
or not. And the output shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
...@@ -69,11 +72,11 @@ class PReluGradOp : public framework::OperatorWithKernel { ...@@ -69,11 +72,11 @@ class PReluGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto *dx = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto *x = ctx.Input<framework::Tensor>("X"); auto *x = ctx.Input<framework::Tensor>("X");
auto *dalpha = auto *dalpha =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Alpha")); ctx.Output<framework::Tensor>(framework::GradVarName("Alpha"));
auto *alpha = ctx.Input<framework::Tensor>("Alpha"); auto *alpha = ctx.Input<framework::Tensor>("Alpha");
dx->Resize(x->dims()); dx->Resize(x->dims());
......
...@@ -40,7 +40,7 @@ class RankLossOp : public framework::OperatorWithKernel { ...@@ -40,7 +40,7 @@ class RankLossOp : public framework::OperatorWithKernel {
"All inputs must have the same size"); "All inputs must have the same size");
PADDLE_ENFORCE((label_dims.size() == 2) && (label_dims[1] == 1), PADDLE_ENFORCE((label_dims.size() == 2) && (label_dims[1] == 1),
"All inputs must be row vector with size batch_size x 1."); "All inputs must be row vector with size batch_size x 1.");
ctx.Output<framework::LoDTensor>("Out")->Resize(label_dims); ctx.Output<framework::Tensor>("Out")->Resize(label_dims);
} }
}; };
...@@ -102,9 +102,9 @@ class RankLossGradOp : public framework::OperatorWithKernel { ...@@ -102,9 +102,9 @@ class RankLossGradOp : public framework::OperatorWithKernel {
"Input(Out@GRAD) shouldn't be null."); "Input(Out@GRAD) shouldn't be null.");
auto dims = ctx.Input<framework::Tensor>("Left")->dims(); auto dims = ctx.Input<framework::Tensor>("Left")->dims();
auto *left_grad = auto *left_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Left")); ctx.Output<framework::Tensor>(framework::GradVarName("Left"));
auto *right_grad = auto *right_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Right")); ctx.Output<framework::Tensor>(framework::GradVarName("Right"));
if (left_grad) { if (left_grad) {
left_grad->Resize(dims); left_grad->Resize(dims);
} }
......
...@@ -24,7 +24,7 @@ template <typename Place, typename T> ...@@ -24,7 +24,7 @@ template <typename Place, typename T>
class RankLossKernel : public framework::OpKernel { class RankLossKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const { void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::LoDTensor>("Out"); auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* label_t = ctx.Input<framework::Tensor>("Label"); auto* label_t = ctx.Input<framework::Tensor>("Label");
auto* left_t = ctx.Input<framework::Tensor>("Left"); auto* left_t = ctx.Input<framework::Tensor>("Left");
auto* right_t = ctx.Input<framework::Tensor>("Right"); auto* right_t = ctx.Input<framework::Tensor>("Right");
...@@ -46,9 +46,9 @@ class RankLossGradKernel : public framework::OpKernel { ...@@ -46,9 +46,9 @@ class RankLossGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const { void Compute(const framework::ExecutionContext& ctx) const {
auto* d_left_t = auto* d_left_t =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Left")); ctx.Output<framework::Tensor>(framework::GradVarName("Left"));
auto* d_right_t = auto* d_right_t =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Right")); ctx.Output<framework::Tensor>(framework::GradVarName("Right"));
auto* d_out_t = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* d_out_t = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* label_t = ctx.Input<framework::Tensor>("Label"); auto* label_t = ctx.Input<framework::Tensor>("Label");
......
...@@ -50,7 +50,12 @@ class ReshapeOp : public framework::OperatorWithKernel { ...@@ -50,7 +50,12 @@ class ReshapeOp : public framework::OperatorWithKernel {
std::transform(shape.begin(), shape.end(), shape_int64.begin(), std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); }); [](int a) { return static_cast<int64_t>(a); });
auto out_dims = framework::make_ddim(shape_int64); auto out_dims = framework::make_ddim(shape_int64);
ctx.Output<framework::LoDTensor>("Out")->Resize(out_dims); ctx.Output<framework::Tensor>("Out")->Resize(out_dims);
if (shape[0] == in->dims()[0]) {
// Only pass LoD when the first dimension is equal between
// output and input.
ctx.ShareLoD("X", /*->*/ "Out");
}
} }
}; };
...@@ -94,7 +99,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel { ...@@ -94,7 +99,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null."); "Input(Out@GRAD) shouldn't be null.");
auto dims = ctx.Input<framework::Tensor>("X")->dims(); auto dims = ctx.Input<framework::Tensor>("X")->dims();
auto *d_in = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *d_in = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_in->Resize(dims); d_in->Resize(dims);
} }
}; };
......
...@@ -44,7 +44,8 @@ class RowwiseAddOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,8 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims,
"The width of two operands must be same"); "The width of two operands must be same");
PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1"); PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1");
ctx.Output<framework::LoDTensor>("Out")->Resize(x_dims); ctx.Output<framework::Tensor>("Out")->Resize(x_dims);
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -83,8 +84,8 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { ...@@ -83,8 +84,8 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims,
"The width of two operands must be same"); "The width of two operands must be same");
auto *dx = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto *db = ctx.Output<framework::LoDTensor>(framework::GradVarName("b")); auto *db = ctx.Output<framework::Tensor>(framework::GradVarName("b"));
if (dx) dx->Resize(x_dims); if (dx) dx->Resize(x_dims);
if (db) db->Resize(b_dims); if (db) db->Resize(b_dims);
} }
......
...@@ -33,8 +33,9 @@ class ScaleOp : public framework::OperatorWithKernel { ...@@ -33,8 +33,9 @@ class ScaleOp : public framework::OperatorWithKernel {
"Output(Out) of ScaleOp should not be null."); "Output(Out) of ScaleOp should not be null.");
auto *in = ctx.Input<framework::Tensor>("X"); auto *in = ctx.Input<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out"); auto *out = ctx.Output<framework::Tensor>("Out");
out->Resize(in->dims()); out->Resize(in->dims());
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
......
...@@ -44,7 +44,7 @@ class ScatterOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,7 @@ class ScatterOp : public framework::OperatorWithKernel {
framework::DDim data_dim(ctx.Input<Tensor>("Updates")->dims()); framework::DDim data_dim(ctx.Input<Tensor>("Updates")->dims());
for (int i = 1; i < data_dim.size(); ++i) for (int i = 1; i < data_dim.size(); ++i)
PADDLE_ENFORCE_EQ(data_dim[i], ctx.Input<Tensor>("Updates")->dims()[i]); PADDLE_ENFORCE_EQ(data_dim[i], ctx.Input<Tensor>("Updates")->dims()[i]);
ctx.Output<framework::LoDTensor>("Out")->Resize( ctx.Output<framework::Tensor>("Out")->Resize(
ctx.Input<Tensor>("Ref")->dims()); ctx.Input<Tensor>("Ref")->dims());
} }
}; };
...@@ -56,10 +56,9 @@ class ScatterGradOp : public framework::OperatorWithKernel { ...@@ -56,10 +56,9 @@ class ScatterGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto *dUpdates = auto *dUpdates =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Updates")); ctx.Output<framework::Tensor>(framework::GradVarName("Updates"));
auto *Updates = ctx.Input<Tensor>("Updates"); auto *Updates = ctx.Input<Tensor>("Updates");
auto *dRef = auto *dRef = ctx.Output<framework::Tensor>(framework::GradVarName("Ref"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("Ref"));
auto *Ref = ctx.Input<Tensor>("Ref"); auto *Ref = ctx.Input<Tensor>("Ref");
dRef->Resize(Ref->dims()); dRef->Resize(Ref->dims());
......
...@@ -33,7 +33,7 @@ class SGDOp : public framework::OperatorWithKernel { ...@@ -33,7 +33,7 @@ class SGDOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("param")->dims(), PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("param")->dims(),
ctx.Input<Tensor>("grad")->dims(), ctx.Input<Tensor>("grad")->dims(),
"Two input of SGD Op's dimension must be same."); "Two input of SGD Op's dimension must be same.");
ctx.Output<framework::LoDTensor>("param_out") ctx.Output<framework::Tensor>("param_out")
->Resize(ctx.Input<Tensor>("param")->dims()); ->Resize(ctx.Input<Tensor>("param")->dims());
} }
}; };
......
...@@ -44,8 +44,8 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { ...@@ -44,8 +44,8 @@ class SmoothL1LossOp : public framework::OperatorWithKernel {
"The shape of OutsideWeight must be same as X."); "The shape of OutsideWeight must be same as X.");
} }
auto* diff = ctx.Output<framework::LoDTensor>("Diff"); auto* diff = ctx.Output<framework::Tensor>("Diff");
auto* out = ctx.Output<framework::LoDTensor>("Out"); auto* out = ctx.Output<framework::Tensor>("Out");
diff->Resize(x->dims()); diff->Resize(x->dims());
// loss is a two-rank tensor // loss is a two-rank tensor
out->Resize({x->dims()[0], 1}); out->Resize({x->dims()[0], 1});
...@@ -103,10 +103,8 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { ...@@ -103,10 +103,8 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel {
auto in_dims = ctx.Input<framework::Tensor>("X")->dims(); auto in_dims = ctx.Input<framework::Tensor>("X")->dims();
auto out_dims = auto out_dims =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"))->dims(); ctx.Input<framework::Tensor>(framework::GradVarName("Out"))->dims();
auto* x_grad = auto* x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto* y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto* y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(out_dims.size(), 2, PADDLE_ENFORCE_GE(out_dims.size(), 2,
"The tensor rank of Input(Out@Grad) should be 2."); "The tensor rank of Input(Out@Grad) should be 2.");
......
...@@ -30,8 +30,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { ...@@ -30,8 +30,7 @@ class SoftmaxOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL, PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL,
"The input of softmax op must be a matrix."); "The input of softmax op must be a matrix.");
ctx.Output<framework::LoDTensor>("Y")->Resize( ctx.Output<framework::Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
ctx.Input<Tensor>("X")->dims());
} }
}; };
...@@ -77,7 +76,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -77,7 +76,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(), ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(),
"Input(Y) and its gradients should have a same shape."); "Input(Y) and its gradients should have a same shape.");
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")) ctx.Output<framework::Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("X")->dims()); ->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -27,7 +27,7 @@ class SplitOp : public framework::OperatorWithKernel { ...@@ -27,7 +27,7 @@ class SplitOp : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
// infershape // infershape
auto *in = ctx.Input<framework::Tensor>("X"); auto *in = ctx.Input<framework::Tensor>("X");
auto outs = ctx.MultiOutput<framework::LoDTensor>("Out"); auto outs = ctx.MultiOutput<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis")); size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t num = static_cast<size_t>(ctx.Attr<int>("num")); size_t num = static_cast<size_t>(ctx.Attr<int>("num"));
std::vector<int> sections = std::vector<int> sections =
......
...@@ -54,9 +54,10 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { ...@@ -54,9 +54,10 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
"First dimension of target must be equal to input " "First dimension of target must be equal to input "
"or to 1."); "or to 1.");
ctx.Output<framework::LoDTensor>("sub_result") ctx.Output<framework::Tensor>("sub_result")
->Resize({x_dims[0], x->numel() / x_dims[0]}); ->Resize({x_dims[0], x->numel() / x_dims[0]});
ctx.Output<framework::LoDTensor>("Out")->Resize({x_dims[0], 1}); ctx.Output<framework::Tensor>("Out")->Resize({x_dims[0], 1});
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -79,6 +80,9 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -79,6 +80,9 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker {
input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp
will broadcast target's first dimension to input's first dimension. will broadcast target's first dimension to input's first dimension.
You can decide whether calculate the gradient of input and target. You can decide whether calculate the gradient of input and target.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input X.
)DOC"); )DOC");
} }
}; };
...@@ -100,10 +104,8 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { ...@@ -100,10 +104,8 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(out_dims[1], 1, PADDLE_ENFORCE_EQ(out_dims[1], 1,
"Second dimension of output gradient " "Second dimension of output gradient "
"must be 1."); "must be 1.");
auto* x_grad = auto* x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto* y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto* y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims); if (y_grad) y_grad->Resize(y_dims);
} }
......
...@@ -28,7 +28,7 @@ class SumOp : public framework::OperatorWithKernel { ...@@ -28,7 +28,7 @@ class SumOp : public framework::OperatorWithKernel {
"Output(Out) of SumOp should not be null."); "Output(Out) of SumOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X"); auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out"); auto *out = ctx.Output<framework::Tensor>("Out");
int N = ins.size(); int N = ins.size();
auto in_dim = ins[0]->dims(); auto in_dim = ins[0]->dims();
...@@ -39,6 +39,7 @@ class SumOp : public framework::OperatorWithKernel { ...@@ -39,6 +39,7 @@ class SumOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape");
} }
out->Resize(in_dim); out->Resize(in_dim);
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -49,8 +50,11 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -49,8 +50,11 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "the input tensors of sum operator.").AsDuplicable(); AddInput("X", "the input tensors of sum operator.").AsDuplicable();
AddOutput("Out", "the output tensor of sum operator."); AddOutput("Out", "the output tensor of sum operator.");
AddComment(R"DOC( AddComment(R"DOC(
Sum the input tensors. Sum the input tensors.
)DOC");
All the inputs can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with the first input.
)DOC");
} }
}; };
...@@ -61,7 +65,7 @@ class SumGradOp : public framework::OperatorWithKernel { ...@@ -61,7 +65,7 @@ class SumGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto outputs = auto outputs =
ctx.MultiOutput<framework::LoDTensor>(framework::GradVarName("X")); ctx.MultiOutput<framework::Tensor>(framework::GradVarName("X"));
auto dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
for (auto output : outputs) { for (auto output : outputs) {
output->Resize(dims); output->Resize(dims);
......
...@@ -40,8 +40,8 @@ class TopkOp : public framework::OperatorWithKernel { ...@@ -40,8 +40,8 @@ class TopkOp : public framework::OperatorWithKernel {
framework::DDim dims = input->dims(); framework::DDim dims = input->dims();
dims[dims.size() - 1] = k; dims[dims.size() - 1] = k;
ctx.Output<framework::LoDTensor>("Out")->Resize(dims); ctx.Output<framework::Tensor>("Out")->Resize(dims);
ctx.Output<framework::LoDTensor>("Indices")->Resize(dims); ctx.Output<framework::Tensor>("Indices")->Resize(dims);
} }
}; };
......
...@@ -51,7 +51,7 @@ class TransposeOp : public framework::OperatorWithKernel { ...@@ -51,7 +51,7 @@ class TransposeOp : public framework::OperatorWithKernel {
for (size_t i = 0; i < axis_size; i++) { for (size_t i = 0; i < axis_size; i++) {
out_dims[i] = x_dims[axis[i]]; out_dims[i] = x_dims[axis[i]];
} }
ctx.Output<framework::LoDTensor>("Out")->Resize(out_dims); ctx.Output<framework::Tensor>("Out")->Resize(out_dims);
} }
}; };
...@@ -99,8 +99,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel { ...@@ -99,8 +99,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_grad = auto *x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
} }
......
...@@ -54,7 +54,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -54,7 +54,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(Attr<float>("min") < Attr<float>("max"), PADDLE_ENFORCE(Attr<float>("min") < Attr<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::LoDTensor>("Out"); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = Attr<std::vector<int>>("dims"); auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(dims.size());
......
...@@ -6,8 +6,8 @@ from op_test import OpTest ...@@ -6,8 +6,8 @@ from op_test import OpTest
class TestFillZerosLikeOp(OpTest): class TestFillZerosLikeOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "fill_zeros_like" self.op_type = "fill_zeros_like"
self.inputs = {'Src': np.random.random((219, 232)).astype("float32")} self.inputs = {'X': np.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])} self.outputs = {'Y': np.zeros_like(self.inputs["X"])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册