scale_op.h 2.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
19

Y
Yu Yang 已提交
20 21
namespace paddle {
namespace operators {
22 23 24 25 26 27 28 29 30 31 32

static inline float GetAttrFromTensor(const framework::Tensor* tensor) {
  const float* tensor_data = tensor->data<float>();
  framework::Tensor cpu_tensor;
  if (platform::is_gpu_place(tensor->place())) {
    TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor);
    tensor_data = cpu_tensor.data<float>();
  }
  return tensor_data[0];
}

Q
QI JUN 已提交
33
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
34
class ScaleKernel : public framework::OpKernel<T> {
Y
Yu Yang 已提交
35
 public:
36 37
  virtual void Compute(const framework::ExecutionContext& ctx) const {
    auto* in_var = ctx.InputVar("X");
C
chengduo 已提交
38
    auto* in = framework::GetLoDTensorOrSelectedRowsValueFromVar(*in_var);
S
sneaxiy 已提交
39 40

    auto bias = static_cast<T>(ctx.Attr<float>("bias"));
S
sneaxiy 已提交
41
    auto bias_after_scale = ctx.Attr<bool>("bias_after_scale");
S
sneaxiy 已提交
42

43 44 45 46 47 48
    auto scale = static_cast<T>(ctx.Attr<float>("scale"));
    if (ctx.HasInput("ScaleTensor")) {
      auto* scale_tensor = ctx.Input<framework::Tensor>("ScaleTensor");
      scale = GetAttrFromTensor(scale_tensor);
    }

C
chengduo 已提交
49
    auto* out_var = ctx.OutputVar("Out");
S
sneaxiy 已提交
50 51 52 53 54 55 56
    if (in_var->IsType<framework::SelectedRows>() && in_var != out_var) {
      auto& in_slr = in_var->Get<framework::SelectedRows>();
      auto* out_slr = out_var->GetMutable<framework::SelectedRows>();
      out_slr->set_rows(in_slr.rows());
      out_slr->set_height(in_slr.height());
    }

C
chengduo 已提交
57 58 59 60 61 62 63
    auto* out =
        framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(out_var);
    out->mutable_data<T>(in->place());

    PADDLE_ENFORCE_EQ(in->dims(), out->dims(),
                      "in and out should have the same dim");

S
sneaxiy 已提交
64
    auto eigen_out = framework::EigenVector<T>::Flatten(*out);
Y
Yu Yang 已提交
65
    auto eigen_in = framework::EigenVector<T>::Flatten(*in);
S
sneaxiy 已提交
66
    auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
S
sneaxiy 已提交
67 68 69 70 71
    if (bias_after_scale) {
      eigen_out.device(dev) = scale * eigen_in + bias;
    } else {
      eigen_out.device(dev) = scale * (eigen_in + bias);
    }
Y
Yu Yang 已提交
72 73 74 75 76
  }
};

}  // namespace operators
}  // namespace paddle