提交 901b0411 编写于 作者: W wanghaoshuang

Add seq_expand op

1. Add unitest
2. Add SeqExpandOpKernel
上级 bb81baa1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/seq_expand_op.h"
namespace paddle {
namespace operators {
using framework::Tensor;
class SeqExpandOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SeqExpandOp should not be null.");
int repeat = ctx->Attrs().Get<int>("repeat");
DDim out_dim;
if (repeat == 0) {
PADDLE_ENFORCE(
ctx->HasInput("Y"),
"Input(Y) of SeqExpandOp should not be null while repeat == 0.");
out_dim = ctx->GetInputDim("Y");
ctx->ShareLoD("Y", "Out");
} else {
out_dim = ctx->GetInputDim("X");
out_dim[0] = out_dim[0] * repeat;
ctx->SetOutputDim("Out", y_dim);
}
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of PadOp should not be null.");
ctx->SetOutputDim("Out", out_dim);
}
};
class SeqExpandOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SeqExpandOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
// TODO(wanghaoshuang): Add more comments
AddInput("X", "The input('X') of seq_expand op.");
AddInput("Y", "The reference input('Y') of seq_expand op.");
AddOutput("Out", "The output of seq_expand op.");
AddAttr<int>("repeat", "repeat times").SetDefault(0);
AddComment(R"DOC(
As an example:
Given:
X = [1, 2 , 3]
and
repeat = 2
then we get
Out.data = [1, 1, 2, 2, 3, 3]
Out.lod = [[0, 2, 4, 6]]
)DOC");
}
};
class SeqExpandOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, x_dims);
}
}
};
class SeqExpandOpGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto* bind = new framework::OpDescBind();
bind->SetInput("X", Input("X"));
bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
bind->SetOutput(framework::GradVarName("X"), InputGrad("X"));
bind->SetAttrMap(Attrs());
bind->SetType("seq_expand_grad");
return std::unique_ptr<framework::OpDescBind>(bind);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(seq_expand, ops::SeqExpandOp, ops::SeqExpandOpMaker,
ops::SeqExpandOpGradMaker);
REGISTER_OPERATOR(seq_expand_grad, ops::SeqExpandOpGrad);
REGISTER_OP_CPU_KERNEL(seq_expand,
ops::SeqExpandKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
seq_expand_grad,
ops::SeqExpandGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/seq_expand_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(seq_expand,
ops::SeqExpandKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
seq_expand_grad,
ops::SeqExpandGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "hl_cuda.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
using LoD = paddle::framework::LoD;
template <typename Place, typename T>
class SeqExpandKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(context.GetPlace());
size_t repeat = static_cast<size_t>(context.Attr<int>("repeat"));
if (repeat != 0) {
if (x->lod().size() == 0) {
std::vector<size_t> level0(x->dims()[0]);
for (size_t i = 0; i <= x->dims()[0]; i++) {
level0.push_back(i * repeat);
}
const LoD out_lod;
out_lod.push_back(level0);
out->set_lod(out_lod);
}
}
auto out_dim = out->dims();
size_t element_len = framework::product(out_dim) / out_dim[0];
std::vector<int> cpy_map(out_dim[0]);
if (x->lod().size() == 0) {
auto lod = out->lod();
for (int i = 0; i < lod.size() - 1; ++i) {
for (int j = lod[0][i]; i < lod[0][i + 1]; ++j) {
cpy_map[j] = i;
}
}
}
if (paddle::platform::CPUPlace() == Place) {
for (int i = 0; i < out_dim[0]; ++i) {
memcpy(out_data + element_len * i, x_data + element_len * cpy_map[i],
sizeof(T) * element_len);
}
} else {
for (int i = 0; i < out_dim[0]; ++i) {
hl_memcpy(out_data + element_len * i, x_data + element_len * cpy_map[i],
sizeof(T) * element_len);
}
}
}
};
template <typename Place, typename T>
class SeqExpandGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
// auto* d_out = context.Input<Tensor>(framework::GradVarName("Out"));
// auto* d_x = context.Output<Tensor>(framework::GradVarName("X"));
// d_x->mutable_data<T>(context.GetPlace());
}
};
} // namespace operators
} // namespace paddle
import unittest
import numpy as np
from op_test import OpTest
class TestSeqExpand(OpTest):
#class TestSeqExpand():
def set_data(self):
self.op_type = 'seq_expand'
x = np.random.uniform(0.1, 1, [3, 2, 2]).astype('float32')
y = np.zeros((6, 2, 2)).astype('float32')
lod = [[0, 2, 3, 6]]
print "x = %s" % x
self.inputs = {'X': x, 'Y': (y, lod)}
self.repeat = None
def compute(self):
x = self.inputs['X']
cpy_map = {}
lod = []
out_shape = []
if self.repeat:
level0 = []
for i in range(x.shape[0] + 1):
level0.append(i * self.repeat)
lod.append(level0)
for i in x.shape:
out_shape.append(i)
out_shape[0] = out_shape[0] * self.repeat
else:
y, lod = self.inputs['Y']
out_shape = y.shape
out = np.zeros(out_shape).astype('float32')
start = 0
for i in range(len(lod[0]) - 1):
for j in range(lod[0][i], lod[0][i + 1]):
cpy_map[j] = i
print "cpy_map = %s" % cpy_map
for i in range(len(out)):
out[i] = x[cpy_map[i]]
print "out = %s" % out
self.outputs = {'Out': (out, lod)}
def setUp(self):
self.set_data()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
# TestSeqExpand().setUp()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册