cos_sim_op.h 5.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
X
Xinghai Sun 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xinghai Sun 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
X
Xinghai Sun 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xinghai Sun 已提交
14 15

#pragma once
Y
Yi Wang 已提交
16 17 18 19
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/for_range.h"
X
Xinghai Sun 已提交
20 21 22 23 24 25

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

Q
QI JUN 已提交
26
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
27
class CosSimKernel : public framework::OpKernel<T> {
X
Xinghai Sun 已提交
28 29
 public:
  void Compute(const framework::ExecutionContext& context) const override {
30 31 32 33 34 35 36 37 38
    // get Tensor
    auto* in_x = context.Input<Tensor>("X");
    auto* in_y = context.Input<Tensor>("Y");
    auto* out_z = context.Output<Tensor>("Out");
    auto* out_x_norm = context.Output<Tensor>("XNorm");
    auto* out_y_norm = context.Output<Tensor>("YNorm");
    out_z->mutable_data<T>(context.GetPlace());
    out_x_norm->mutable_data<T>(context.GetPlace());
    out_y_norm->mutable_data<T>(context.GetPlace());
X
Xinghai Sun 已提交
39

40 41
    int rows_x = in_x->dims()[0];
    int rows_y = in_y->dims()[0];
C
chengduoZH 已提交
42 43

    int cols = framework::product(in_x->dims()) / rows_x;
C
chengduoZH 已提交
44 45

    if (rows_x == rows_y) {
C
chengduoZH 已提交
46
      math::CosSimFunctor<T, true> functor(
C
chengduoZH 已提交
47 48
          in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(),
          out_y_norm->data<T>(), out_z->data<T>(), cols);
49 50 51
      platform::ForRange<DeviceContext> for_range(
          static_cast<const DeviceContext&>(context.device_context()), rows_x);
      for_range(functor);
C
chengduoZH 已提交
52
    } else {
C
chengduoZH 已提交
53
      math::CosSimFunctor<T, false> functor(
C
chengduoZH 已提交
54 55
          in_x->data<T>(), in_y->data<T>(), out_x_norm->data<T>(),
          out_y_norm->data<T>(), out_z->data<T>(), cols);
56 57 58
      platform::ForRange<DeviceContext> for_range(
          static_cast<const DeviceContext&>(context.device_context()), rows_x);
      for_range(functor);
C
chengduoZH 已提交
59
    }
X
Xinghai Sun 已提交
60 61 62
  }
};

Q
QI JUN 已提交
63
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
64
class CosSimGradKernel : public framework::OpKernel<T> {
X
Xinghai Sun 已提交
65 66
 public:
  void Compute(const framework::ExecutionContext& context) const override {
67 68 69 70 71 72 73 74 75
    // get Tensor
    auto* in_x = context.Input<Tensor>("X");
    auto* in_y = context.Input<Tensor>("Y");
    auto* in_z = context.Input<Tensor>("Out");
    auto* in_x_norm = context.Input<Tensor>("XNorm");
    auto* in_y_norm = context.Input<Tensor>("YNorm");
    auto* out_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
    auto* out_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
    auto* in_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
X
Xinghai Sun 已提交
76

77
    // compute gradident
78 79 80
    int rows_x = in_x->dims()[0];
    int rows_y = in_y->dims()[0];
    int cols = framework::product(in_x->dims()) / rows_x;
C
chengduoZH 已提交
81

C
chengduoZH 已提交
82 83
    if (rows_x == rows_y) {
      if (out_grad_x) {
C
chengduoZH 已提交
84
        math::CosSimGradFunctor<T> functor(
C
chengduoZH 已提交
85 86 87
            in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(),
            in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
            out_grad_x->mutable_data<T>(context.GetPlace()), cols);
88 89 90 91
        platform::ForRange<DeviceContext> for_range(
            static_cast<const DeviceContext&>(context.device_context()),
            rows_x);
        for_range(functor);
C
chengduoZH 已提交
92 93
      }
      if (out_grad_y) {
C
chengduoZH 已提交
94
        math::CosSimGradFunctor<T> functor(
C
chengduoZH 已提交
95 96 97
            in_y_norm->data<T>(), in_x_norm->data<T>(), in_y->data<T>(),
            in_x->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
            out_grad_y->mutable_data<T>(context.GetPlace()), cols);
98 99 100 101
        platform::ForRange<DeviceContext> for_range(
            static_cast<const DeviceContext&>(context.device_context()),
            rows_x);
        for_range(functor);
C
chengduoZH 已提交
102 103 104
      }
    } else {
      if (out_grad_x) {
C
chengduoZH 已提交
105
        math::CosSimDxFunctor<T> functor(
C
chengduoZH 已提交
106 107
            in_x_norm->data<T>(), in_y_norm->data<T>(), in_x->data<T>(),
            in_y->data<T>(), in_z->data<T>(), in_grad_z->data<T>(),
C
refine  
chengduoZH 已提交
108
            out_grad_x->mutable_data<T>(context.GetPlace()), cols);
109 110 111 112
        platform::ForRange<DeviceContext> for_range(
            static_cast<const DeviceContext&>(context.device_context()),
            rows_x);
        for_range(functor);
C
chengduoZH 已提交
113 114
      }
      if (out_grad_y) {
C
refine  
chengduoZH 已提交
115 116 117 118 119
        out_grad_y->mutable_data<T>(context.GetPlace());
        math::SetConstant<DeviceContext, T> set_zero;
        auto& dev_ctx = context.template device_context<DeviceContext>();
        set_zero(dev_ctx, out_grad_y, static_cast<T>(0));

C
chengduoZH 已提交
120
        math::CosSimDyFunctor<DeviceContext, T> functor;
C
chengduoZH 已提交
121 122 123 124
        functor(dev_ctx, in_x_norm->data<T>(), in_y_norm->data<T>(),
                in_x->data<T>(), in_y->data<T>(), in_z->data<T>(),
                in_grad_z->data<T>(), static_cast<size_t>(rows_x),
                static_cast<size_t>(cols), out_grad_y->data<T>());
C
chengduoZH 已提交
125
      }
126
    }
X
Xinghai Sun 已提交
127 128 129 130 131
  }
};

}  // namespace operators
}  // namespace paddle