提交 0417e4e4 编写于 作者: C chengduoZH

fix framework::LoDTensor => Tensor

上级 6f61b5df
......@@ -16,17 +16,13 @@ limitations under the License. */
#include "paddle/framework/eigen.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
namespace math {
//////////////////////
#ifdef __NVCC__
#define HL_DEVICE __device__
#else
#define HL_DEVICE
#endif
#define FLT_MAX __FLT_MAX__
/////////////////////
......@@ -34,11 +30,11 @@ namespace pool {
template <class T>
class maxPool {
public:
HL_DEVICE inline T initial() { return -(T)(FLT_MAX); }
HL_DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; }
HL_DEVICE inline void finalize(T& y, const T& poo_size) {}
HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) {
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; }
DEVICE inline void finalize(T& y, const T& poo_size) {}
DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) {
dx += dy * (x == y);
}
};
......@@ -46,11 +42,11 @@ class maxPool {
template <class T>
class avePool {
public:
HL_DEVICE inline T initial() { return 0; }
HL_DEVICE inline void process(T& y, const T& x) { y += x; }
HL_DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; }
HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) {
DEVICE inline T initial() { return static_cast<T>(0); }
DEVICE inline void process(T& y, const T& x) { y += x; }
DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; }
DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) {
dx += (scale * dy);
}
};
......
......@@ -37,7 +37,7 @@ class PoolOp : public framework::OperatorWithKernel {
// PADDLE_ENFORCE_NOT_NULL(Attr<std::vector<int>>("ksize"), "ksize should
// not be null.");
auto in_X = ctx.Input<Tensor>("X");
auto out = ctx.Output<framework::LoDTensor>("Out");
auto out = ctx.Output<Tensor>("Out");
int global_pooling = Attr<int>("globalPooling");
std::string pooling_type = Attr<std::string>("poolingType");
std::vector<int> ksize = Attr<std::vector<int>>("ksize");
......@@ -78,7 +78,7 @@ class PoolOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto in = ctx.Input<Tensor>("X");
auto d_in = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto d_in = ctx.Output<Tensor>(framework::GradVarName("X"));
if (d_in) d_in->Resize(in->dims());
}
};
......
......@@ -90,8 +90,7 @@ class PoolGradKernel : public framework::OpKernel {
const Tensor* out = context.Input<Tensor>("Out");
const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_X_grad =
context.Output<framework::LoDTensor>(framework::GradVarName("X"));
Tensor* in_X_grad = context.Output<Tensor>(framework::GradVarName("X"));
int global_pooling = context.Attr<int>("globalPooling");
std::string pooling_type = context.Attr<std::string>("poolingType");
......
......@@ -2,8 +2,10 @@
#ifdef __CUDACC__
#define HOSTDEVICE __host__ __device__
#define DEVICE __device__
#define HOST __host__
#else
#define HOSTDEVICE
#define DEVICE
#define HOST
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册