elementwise_ops.cc 5.1 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/operators/elementwise_ops.h"
X
xiaogang 已提交
16 17
#include <algorithm>
#include <cmath>
Y
Yan Chunwei 已提交
18 19 20 21 22 23 24 25 26 27 28 29
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace operators {

bool ElementwiseOp::CheckShape() const {
  CHECK_OR_FALSE(param_.X);
  CHECK_OR_FALSE(param_.Y);
  CHECK_OR_FALSE(param_.Out);
  return true;
}

30
bool ElementwiseOp::InferShapeImpl() const {
X
xiaogang 已提交
31 32 33 34 35 36 37
  auto x_dim = param_.X->dims();
  auto y_dim = param_.Y->dims();
  if (x_dim == y_dim) {
    param_.Out->Resize(x_dim);
    auto out_lod = param_.Out->mutable_lod();
    *out_lod = param_.X->lod();
  } else {
38 39
    size_t max_dim =
        (x_dim.size() > y_dim.size() ? x_dim.size() : y_dim.size());
X
xiaogang 已提交
40 41 42 43 44 45 46 47 48 49 50 51
    int axis = param_.axis;
    axis = (axis == -1 ? std::abs(static_cast<int>(x_dim.size() - y_dim.size()))
                       : axis);
    std::vector<int64_t> x_dims_array(max_dim);
    std::vector<int64_t> y_dims_array(max_dim);
    std::vector<int64_t> out_dims_array(max_dim);

    if (x_dim.size() > y_dim.size()) {
      for (int i = 0; i < axis; ++i) {
        y_dims_array[i] = 1;
      }
      if (axis + y_dim.size() < max_dim) {
52
        for (size_t i = axis + y_dim.size(); i < max_dim; ++i) {
X
xiaogang 已提交
53 54 55 56
          y_dims_array[i] = 1;
        }
      }
      x_dims_array = x_dim.Vectorize();
57
      for (size_t i = 0; i < y_dim.size(); ++i) {
X
xiaogang 已提交
58 59 60 61 62 63 64
        y_dims_array[i + axis] = y_dim[i];
      }
    } else {
      for (int i = 0; i < axis; ++i) {
        x_dims_array[i] = 1;
      }
      if (axis + x_dim.size() < max_dim) {
65
        for (size_t i = axis + x_dim.size(); i < max_dim; ++i) {
X
xiaogang 已提交
66 67 68 69
          x_dims_array[i] = 1;
        }
      }
      y_dims_array = y_dim.Vectorize();
70
      for (size_t i = 0; i < x_dim.size(); ++i) {
X
xiaogang 已提交
71 72 73
        x_dims_array[i + axis] = x_dim[i];
      }
    }
74
    for (size_t i = 0; i < max_dim; i++) {
X
xiaogang 已提交
75 76 77 78 79 80 81 82 83 84
      if (x_dims_array[i] == -1 || y_dims_array[i] == -1) {
        out_dims_array[i] = -1;
      } else {
        out_dims_array[i] = std::max(x_dims_array[i], y_dims_array[i]);
      }
    }
    param_.Out->Resize(DDim(out_dims_array));
    auto out_lod = param_.Out->mutable_lod();
    *out_lod = param_.X->lod();
  }
X
xiaogang 已提交
85

Y
Yan Chunwei 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
  return true;
}

bool ElementwiseOp::AttachImpl(const cpp::OpDesc& opdesc, lite::Scope* scope) {
  auto X_name = opdesc.Input("X").front();
  auto Y_name = opdesc.Input("Y").front();
  auto Out_name = opdesc.Output("Out").front();

  param_.X = GetVar<lite::Tensor>(scope, X_name);
  param_.Y = GetVar<lite::Tensor>(scope, Y_name);
  param_.Out = GetMutableVar<lite::Tensor>(scope, Out_name);
  param_.axis = opdesc.GetAttr<int>("axis");
  return true;
}

M
mapingshuo 已提交
101 102 103 104 105 106 107
// #ifdef LITE_WITH_TRAIN
// bool ElementwiseGradExplicitOp::CheckShape() const {
//  CHECK_OR_FALSE(param_.Y);
//  CHECK_OR_FALSE(param_.X_grad);
//  CHECK_OR_FALSE(param_.Out_grad);
//  return true;
//}
Y
Yan Chunwei 已提交
108

109
// bool ElementwiseGradExplicitOp::InferShapeImpl() const {
M
mapingshuo 已提交
110 111 112 113
//   param_.X_grad->Resize(param_.Out_grad->dims());
//   if (param_.Y_grad) param_.Y_grad->Resize(param_.Y->dims());
//   return true;
// }
Y
Yan Chunwei 已提交
114

M
mapingshuo 已提交
115 116 117 118 119 120
// bool ElementwiseGradExplicitOp::AttachImpl(const cpp::OpDesc& opdesc,
//                                            lite::Scope* scope) {
//   CHECK_EQ(opdesc.InputArgumentNames().size(), 2UL);
//   auto Y_name = opdesc.Input("Y").front();
//   auto Out_name = opdesc.Input(framework::GradVarName("Out")).front();
//   auto X_grad = opdesc.Output(framework::GradVarName("X")).front();
Y
Yan Chunwei 已提交
121

M
mapingshuo 已提交
122 123 124 125 126 127 128 129
//   if (opdesc.Output(framework::GradVarName("Y")).size() > 0) {
//     auto Y_grad = opdesc.Output(framework::GradVarName("Y")).front();
//     param_.Y_grad = GetMutableVar<Tensor>(scope, Y_grad);
//   }
//   param_.Y = GetVar<lite::Tensor>(scope, Y_name);
//   param_.Out_grad = GetVar<lite::Tensor>(scope, Out_name);
//   param_.X_grad = GetMutableVar<lite::Tensor>(scope, X_grad);
//   param_.axis = opdesc.GetAttr<int>("axis");
Y
Yan Chunwei 已提交
130

M
mapingshuo 已提交
131 132 133
//   return true;
// }
// #endif
Y
Yan Chunwei 已提交
134 135 136 137 138 139 140 141 142 143

}  // namespace operators
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_OP(elementwise_sub, paddle::lite::operators::ElementwiseOp);
REGISTER_LITE_OP(elementwise_add, paddle::lite::operators::ElementwiseOp);

REGISTER_LITE_OP(elementwise_mul, paddle::lite::operators::ElementwiseOp);
REGISTER_LITE_OP(elementwise_max, paddle::lite::operators::ElementwiseOp);
144
REGISTER_LITE_OP(elementwise_div, paddle::lite::operators::ElementwiseOp);
Y
Yan Chunwei 已提交
145

M
mapingshuo 已提交
146 147 148 149 150 151
// #ifdef LITE_WITH_TRAIN
// REGISTER_LITE_OP(elementwise_sub_grad,
//                  paddle::lite::operators::ElementwiseGradExplicitOp);
// REGISTER_LITE_OP(elementwise_add_grad,
//                  paddle::lite::operators::ElementwiseGradExplicitOp);
// #endif