提交 74dd8366 编写于 作者: W wangliu

Merge remote-tracking branch 'upstream/develop' into develop

...@@ -84,7 +84,7 @@ struct Variant { ...@@ -84,7 +84,7 @@ struct Variant {
if (type_id == typeid(T).hash_code()) { if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data)); return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else { } else {
PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant "); PADDLE_MOBILE_THROW_EXCEPTION(" bad cast in variant");
exit(0); exit(0);
} }
} }
......
...@@ -42,8 +42,17 @@ class FusionOpRegister { ...@@ -42,8 +42,17 @@ class FusionOpRegister {
matchers_[matcher->Type()] = shared_matcher; matchers_[matcher->Type()] = shared_matcher;
} }
const std::map<std::string, std::shared_ptr<FusionOpMatcher>> Matchers() { const std::vector<std::shared_ptr<FusionOpMatcher>> Matchers() {
return matchers_; std::vector<std::shared_ptr<FusionOpMatcher>> matchers;
for (const auto& match : matchers_) {
matchers.push_back(match.second);
}
std::sort(matchers.begin(), matchers.end(),
[](std::shared_ptr<FusionOpMatcher> first,
std::shared_ptr<FusionOpMatcher> second) {
return first->BeginNode().Depth() > second->BeginNode().Depth();
});
return matchers;
} }
private: private:
......
...@@ -44,23 +44,6 @@ bool Node::operator==(const Node &in) { ...@@ -44,23 +44,6 @@ bool Node::operator==(const Node &in) {
return true; return true;
} }
std::vector<std::shared_ptr<framework::OpDesc>> Node::OpDescs(int size) {
std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
OpDescs(size - 1, &op_descs);
return op_descs;
}
void Node::OpDescs(int index,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc) {
if (index == 0) {
return;
}
op_desc->push_back(this->op_desc_);
for (auto &output : outputs_) {
output->OpDescs(index, op_desc);
}
}
std::shared_ptr<Node> Node::To(int size) { std::shared_ptr<Node> Node::To(int size) {
std::shared_ptr<Node> node = std::make_shared<Node>(); std::shared_ptr<Node> node = std::make_shared<Node>();
this->To(size - 1, node); this->To(size - 1, node);
......
...@@ -47,13 +47,10 @@ class Node { ...@@ -47,13 +47,10 @@ class Node {
std::map<std::string, std::vector<std::pair<std::string, std::string>>> std::map<std::string, std::vector<std::pair<std::string, std::string>>>
change, change,
std::vector<std::shared_ptr<Node>> *removed_nodes); std::vector<std::shared_ptr<Node>> *removed_nodes);
std::vector<std::shared_ptr<framework::OpDesc>> OpDescs(int size);
std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; } std::shared_ptr<framework::OpDesc> OpDescOfNode() { return op_desc_; }
std::string Type() { return type_; } std::string Type() { return type_; }
private: private:
void OpDescs(int size,
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc);
void To(int index, std::shared_ptr<Node>); void To(int index, std::shared_ptr<Node>);
void Folder( void Folder(
std::shared_ptr<framework::OpDesc> op_desc, std::shared_ptr<framework::OpDesc> op_desc,
......
...@@ -78,9 +78,8 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FusionOptimize( ...@@ -78,9 +78,8 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FusionOptimize(
} }
for (auto &registed : FusionOpRegister::Instance()->Matchers()) { for (auto &registed : FusionOpRegister::Instance()->Matchers()) {
std::string fusion_type = registed.first; std::string fusion_type = registed->Type();
std::shared_ptr<FusionOpMatcher> matcher = registed.second; std::shared_ptr<FusionOpMatcher> matcher = registed;
// DLOG << " registed node \n " << matcher->BeginNode();
auto match_vector = type_map[matcher->BeginType()]; auto match_vector = type_map[matcher->BeginType()];
......
...@@ -26,7 +26,7 @@ void BatchNormOp<Dtype, T>::InferShape() const { ...@@ -26,7 +26,7 @@ void BatchNormOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims(); auto x_dims = this->param_.InputX()->dims();
this->param_.OutputY()->Resize(x_dims); this->param_.OutputY()->Resize(x_dims);
} }
template class BatchNormOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -47,7 +47,7 @@ void BoxCoderOp<Dtype, T>::InferShape() const { ...@@ -47,7 +47,7 @@ void BoxCoderOp<Dtype, T>::InferShape() const {
this->param_.OutputBox()->Resize(framework::make_ddim( this->param_.OutputBox()->Resize(framework::make_ddim(
{input_targetbox_dims[0], input_priorbox_dims[0], 4})); {input_targetbox_dims[0], input_priorbox_dims[0], 4}));
} }
template class BoxCoderOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -56,7 +56,6 @@ void ConcatOp<Dtype, T>::InferShape() const { ...@@ -56,7 +56,6 @@ void ConcatOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class ConcatOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -48,8 +48,6 @@ void ConvOp<Dtype, T>::InferShape() const { ...@@ -48,8 +48,6 @@ void ConvOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class ConvOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -49,8 +49,6 @@ void DepthwiseConvOp<Dtype, T>::InferShape() const { ...@@ -49,8 +49,6 @@ void DepthwiseConvOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class DepthwiseConvOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -22,7 +22,7 @@ void DropoutOp<Dtype, T>::InferShape() const { ...@@ -22,7 +22,7 @@ void DropoutOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class DropoutOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -24,7 +24,7 @@ void ElementwiseAddOp<Dtype, T>::InferShape() const { ...@@ -24,7 +24,7 @@ void ElementwiseAddOp<Dtype, T>::InferShape() const {
auto x_dim = this->param_.InputX()->dims(); auto x_dim = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dim); this->param_.Out()->Resize(x_dim);
} }
template class ElementwiseAddOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -14,10 +14,7 @@ limitations under the License. */ ...@@ -14,10 +14,7 @@ limitations under the License. */
#include "feed_op.h" #include "feed_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {}
template class FeedOp<CPU, float>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -14,10 +14,7 @@ limitations under the License. */ ...@@ -14,10 +14,7 @@ limitations under the License. */
#include "fetch_op.h" #include "fetch_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {}
template class FetchOp<CPU, float>;
}
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -45,7 +45,6 @@ void FusionConvAddOp<Dtype, T>::InferShape() const { ...@@ -45,7 +45,6 @@ void FusionConvAddOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class FusionConvAddOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -36,8 +36,6 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher { ...@@ -36,8 +36,6 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher {
void FolderNodes( void FolderNodes(
framework::Node *node, framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) { std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(), node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}}, removed_nodes); {{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}}, removed_nodes);
} }
......
...@@ -44,7 +44,7 @@ void FusionConvAddBNReluOp<Dtype, T>::InferShape() const { ...@@ -44,7 +44,7 @@ void FusionConvAddBNReluOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_shape); framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class FusionConvAddBNReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -39,8 +39,6 @@ class FusionConvAddBNReluMatcher : public framework::FusionOpMatcher { ...@@ -39,8 +39,6 @@ class FusionConvAddBNReluMatcher : public framework::FusionOpMatcher {
void FolderNodes( void FolderNodes(
framework::Node *node, framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) { std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(), node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}}, {{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}},
{G_OP_TYPE_BATCHNORM, {G_OP_TYPE_BATCHNORM,
......
...@@ -44,7 +44,7 @@ void FusionDWConvBNReluOp<Dtype, T>::InferShape() const { ...@@ -44,7 +44,7 @@ void FusionDWConvBNReluOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_shape); framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class FusionDWConvBNReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -38,8 +38,6 @@ class FusionDWConvBNReluMatcher : public framework::FusionOpMatcher { ...@@ -38,8 +38,6 @@ class FusionDWConvBNReluMatcher : public framework::FusionOpMatcher {
void FolderNodes( void FolderNodes(
framework::Node *node, framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) { std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(), node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_BATCHNORM, {{G_OP_TYPE_BATCHNORM,
{{"Scale", "Scale"}, {{"Scale", "Scale"},
......
...@@ -50,7 +50,6 @@ void FusionFcOp<Dtype, T>::InferShape() const { ...@@ -50,7 +50,6 @@ void FusionFcOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(ddim); this->param_.Out()->Resize(ddim);
} }
template class FusionFcOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -47,8 +47,6 @@ void Im2SequenceOp<Dtype, T>::InferShape() const { ...@@ -47,8 +47,6 @@ void Im2SequenceOp<Dtype, T>::InferShape() const {
this->param_.Output()->Resize(ddim); this->param_.Output()->Resize(ddim);
} }
template class Im2SequenceOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -104,7 +104,7 @@ void ConvAddBNReluBasic(const FusionConvAddBNReluParam &param) { ...@@ -104,7 +104,7 @@ void ConvAddBNReluBasic(const FusionConvAddBNReluParam &param) {
math::matmulWithBn<float>( math::matmulWithBn<float>(
filter_slice, false, col_matrix, false, static_cast<float>(1), filter_slice, false, col_matrix, false, static_cast<float>(1),
&out_slice, static_cast<float>(0), true, &new_scale, &new_bias); &out_slice, static_cast<float>(0), true, &new_scale, &new_bias, g);
} }
} }
} }
......
...@@ -101,10 +101,9 @@ void DWConvBNReluBasic(const FusionDWConvBNReluParam &param) { ...@@ -101,10 +101,9 @@ void DWConvBNReluBasic(const FusionDWConvBNReluParam &param) {
// gemm // gemm
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
std::cout << "***************" << std::endl;
math::matmulWithBn<float>( math::matmulWithBn<float>(
filter_slice, false, col_matrix, false, static_cast<float>(1), filter_slice, false, col_matrix, false, static_cast<float>(1),
&out_slice, static_cast<float>(0), false, &new_scale, &new_bias); &out_slice, static_cast<float>(0), true, &new_scale, &new_bias, g);
} }
} }
} }
......
...@@ -24,7 +24,7 @@ void LrnOp<Dtype, T>::InferShape() const { ...@@ -24,7 +24,7 @@ void LrnOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims(); auto x_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dims); this->param_.Out()->Resize(x_dims);
} }
template class LrnOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -50,7 +50,7 @@ void matmulWithBn<float>(const framework::Tensor &matrix_a, bool trans_a, ...@@ -50,7 +50,7 @@ void matmulWithBn<float>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, const framework::Tensor &matrix_b, bool trans_b,
float alpha, framework::Tensor *matrix_out, float beta, float alpha, framework::Tensor *matrix_out, float beta,
bool relu, framework::Tensor *new_scale, bool relu, framework::Tensor *new_scale,
framework::Tensor *new_bias) { framework::Tensor *new_bias, int group) {
auto dim_a = matrix_a.dims(); auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims(); auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims(); auto dim_out = matrix_out->dims();
...@@ -71,7 +71,8 @@ void matmulWithBn<float>(const framework::Tensor &matrix_a, bool trans_a, ...@@ -71,7 +71,8 @@ void matmulWithBn<float>(const framework::Tensor &matrix_a, bool trans_a,
SgemmWithBn(M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(), SgemmWithBn(M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(),
N, beta, matrix_out->data<float>(), N, relu, N, beta, matrix_out->data<float>(), N, relu,
new_scale->data<float>(), new_bias->data<float>()); new_scale->data<float>() + group,
new_bias->data<float>() + group);
} }
} // namespace math } // namespace math
......
...@@ -31,7 +31,8 @@ template <typename T> ...@@ -31,7 +31,8 @@ template <typename T>
void matmulWithBn(const framework::Tensor &matrix_a, bool trans_a, void matmulWithBn(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, T alpha, const framework::Tensor &matrix_b, bool trans_b, T alpha,
framework::Tensor *matrix_out, T beta, bool relu, framework::Tensor *matrix_out, T beta, bool relu,
framework::Tensor *new_scale, framework::Tensor *new_bias); framework::Tensor *new_scale, framework::Tensor *new_bias,
int group);
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -50,7 +50,7 @@ void MulOp<Dtype, T>::InferShape() const { ...@@ -50,7 +50,7 @@ void MulOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_dims); framework::DDim ddim = framework::make_ddim(output_dims);
this->param_.Out()->Resize(ddim); this->param_.Out()->Resize(ddim);
} }
template class MulOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -34,7 +34,7 @@ void MultiClassNMSOp<Dtype, T>::InferShape() const { ...@@ -34,7 +34,7 @@ void MultiClassNMSOp<Dtype, T>::InferShape() const {
// pre size, will change in Compute. // pre size, will change in Compute.
this->param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6})); this->param_.Out()->Resize(framework::make_ddim({input_bboxes_dims[1], 6}));
} }
template class MultiClassNMSOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -371,7 +371,7 @@ class BatchNormParam : OpParam { ...@@ -371,7 +371,7 @@ class BatchNormParam : OpParam {
input_variance_ = InputVarianceFrom<LoDTensor>(inputs, scope); input_variance_ = InputVarianceFrom<LoDTensor>(inputs, scope);
epsilon_ = GetAttr<float>("epsilon", attrs); epsilon_ = GetAttr<float>("epsilon", attrs);
momentum_ = GetAttr<float>("momentum", attrs); momentum_ = GetAttr<float>("momentum", attrs);
is_test_ = GetAttr<bool>("is_test", attrs); // is_test_ = GetAttr<bool>("is_test", attrs);
} }
const Tensor *InputX() const { return input_x_; } const Tensor *InputX() const { return input_x_; }
......
...@@ -54,7 +54,7 @@ void PoolOp<DeviceType, T>::InferShape() const { ...@@ -54,7 +54,7 @@ void PoolOp<DeviceType, T>::InferShape() const {
} }
this->param_.Output()->Resize(framework::make_ddim(output_shape)); this->param_.Output()->Resize(framework::make_ddim(output_shape));
} }
template class PoolOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -23,7 +23,7 @@ void PReluOp<Dtype, T>::InferShape() const { ...@@ -23,7 +23,7 @@ void PReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class PReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -44,7 +44,7 @@ void PriorBoxOp<Dtype, T>::InferShape() const { ...@@ -44,7 +44,7 @@ void PriorBoxOp<Dtype, T>::InferShape() const {
this->param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec)); this->param_.OutputBoxes()->Resize(framework::make_ddim(dim_vec));
this->param_.OutputVariances()->Resize(framework::make_ddim(dim_vec)); this->param_.OutputVariances()->Resize(framework::make_ddim(dim_vec));
} }
template class PriorBoxOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -23,7 +23,7 @@ void ReluOp<Dtype, T>::InferShape() const { ...@@ -23,7 +23,7 @@ void ReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class ReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -27,7 +27,7 @@ void ReshapeOp<Dtype, T>::InferShape() const { ...@@ -27,7 +27,7 @@ void ReshapeOp<Dtype, T>::InferShape() const {
auto out_dims = ValidateShape(shape, input_x_dims); auto out_dims = ValidateShape(shape, input_x_dims);
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class ReshapeOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -24,7 +24,7 @@ void ResizeOp<Dtype, T>::InferShape() const { ...@@ -24,7 +24,7 @@ void ResizeOp<Dtype, T>::InferShape() const {
auto out_dims = CalOutputShape(this->param_); auto out_dims = CalOutputShape(this->param_);
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class ResizeOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -24,7 +24,7 @@ void ScaleOp<Dtype, T>::InferShape() const { ...@@ -24,7 +24,7 @@ void ScaleOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class ScaleOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -22,7 +22,7 @@ template <typename DeviceType, typename T> ...@@ -22,7 +22,7 @@ template <typename DeviceType, typename T>
void SigmoidOp<DeviceType, T>::InferShape() const { void SigmoidOp<DeviceType, T>::InferShape() const {
this->param_.Out()->Resize(this->param_.InputX()->dims()); this->param_.Out()->Resize(this->param_.InputX()->dims());
} }
template class SigmoidOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -23,7 +23,7 @@ template <typename Dtype, typename T> ...@@ -23,7 +23,7 @@ template <typename Dtype, typename T>
void SliceOp<Dtype, T>::InferShape() const { void SliceOp<Dtype, T>::InferShape() const {
/// todo: add InputShape() detection. /// todo: add InputShape() detection.
} }
template class SliceOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -22,7 +22,7 @@ template <typename DeviceType, typename T> ...@@ -22,7 +22,7 @@ template <typename DeviceType, typename T>
void SoftmaxOp<DeviceType, T>::InferShape() const { void SoftmaxOp<DeviceType, T>::InferShape() const {
this->param_.Out()->Resize(this->param_.InputX()->dims()); this->param_.Out()->Resize(this->param_.InputX()->dims());
} }
template class SoftmaxOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -47,7 +47,7 @@ void TransposeOp<Dtype, T>::InferShape() const { ...@@ -47,7 +47,7 @@ void TransposeOp<Dtype, T>::InferShape() const {
} }
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class TransposeOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册