You need to sign in or sign up before continuing.
提交 0417e4e4 编写于 作者: C chengduoZH

fix framework::LoDTensor => Tensor

上级 6f61b5df
...@@ -16,17 +16,13 @@ limitations under the License. */ ...@@ -16,17 +16,13 @@ limitations under the License. */
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
#include "paddle/platform/hostdevice.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
namespace math { namespace math {
////////////////////// //////////////////////
#ifdef __NVCC__
#define HL_DEVICE __device__
#else
#define HL_DEVICE
#endif
#define FLT_MAX __FLT_MAX__ #define FLT_MAX __FLT_MAX__
///////////////////// /////////////////////
...@@ -34,11 +30,11 @@ namespace pool { ...@@ -34,11 +30,11 @@ namespace pool {
template <class T> template <class T>
class maxPool { class maxPool {
public: public:
HL_DEVICE inline T initial() { return -(T)(FLT_MAX); } DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
HL_DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; }
HL_DEVICE inline void finalize(T& y, const T& poo_size) {} DEVICE inline void finalize(T& y, const T& poo_size) {}
HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) { T scale) {
dx += dy * (x == y); dx += dy * (x == y);
} }
}; };
...@@ -46,11 +42,11 @@ class maxPool { ...@@ -46,11 +42,11 @@ class maxPool {
template <class T> template <class T>
class avePool { class avePool {
public: public:
HL_DEVICE inline T initial() { return 0; } DEVICE inline T initial() { return static_cast<T>(0); }
HL_DEVICE inline void process(T& y, const T& x) { y += x; } DEVICE inline void process(T& y, const T& x) { y += x; }
HL_DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; }
HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx,
T scale) { T scale) {
dx += (scale * dy); dx += (scale * dy);
} }
}; };
......
...@@ -37,7 +37,7 @@ class PoolOp : public framework::OperatorWithKernel { ...@@ -37,7 +37,7 @@ class PoolOp : public framework::OperatorWithKernel {
// PADDLE_ENFORCE_NOT_NULL(Attr<std::vector<int>>("ksize"), "ksize should // PADDLE_ENFORCE_NOT_NULL(Attr<std::vector<int>>("ksize"), "ksize should
// not be null."); // not be null.");
auto in_X = ctx.Input<Tensor>("X"); auto in_X = ctx.Input<Tensor>("X");
auto out = ctx.Output<framework::LoDTensor>("Out"); auto out = ctx.Output<Tensor>("Out");
int global_pooling = Attr<int>("globalPooling"); int global_pooling = Attr<int>("globalPooling");
std::string pooling_type = Attr<std::string>("poolingType"); std::string pooling_type = Attr<std::string>("poolingType");
std::vector<int> ksize = Attr<std::vector<int>>("ksize"); std::vector<int> ksize = Attr<std::vector<int>>("ksize");
...@@ -78,7 +78,7 @@ class PoolOpGrad : public framework::OperatorWithKernel { ...@@ -78,7 +78,7 @@ class PoolOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto in = ctx.Input<Tensor>("X"); auto in = ctx.Input<Tensor>("X");
auto d_in = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto d_in = ctx.Output<Tensor>(framework::GradVarName("X"));
if (d_in) d_in->Resize(in->dims()); if (d_in) d_in->Resize(in->dims());
} }
}; };
......
...@@ -90,8 +90,7 @@ class PoolGradKernel : public framework::OpKernel { ...@@ -90,8 +90,7 @@ class PoolGradKernel : public framework::OpKernel {
const Tensor* out = context.Input<Tensor>("Out"); const Tensor* out = context.Input<Tensor>("Out");
const Tensor* out_grad = const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out")); context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_X_grad = Tensor* in_X_grad = context.Output<Tensor>(framework::GradVarName("X"));
context.Output<framework::LoDTensor>(framework::GradVarName("X"));
int global_pooling = context.Attr<int>("globalPooling"); int global_pooling = context.Attr<int>("globalPooling");
std::string pooling_type = context.Attr<std::string>("poolingType"); std::string pooling_type = context.Attr<std::string>("poolingType");
......
...@@ -2,8 +2,10 @@ ...@@ -2,8 +2,10 @@
#ifdef __CUDACC__ #ifdef __CUDACC__
#define HOSTDEVICE __host__ __device__ #define HOSTDEVICE __host__ __device__
#define DEVICE __device__
#define HOST __host__ #define HOST __host__
#else #else
#define HOSTDEVICE #define HOSTDEVICE
#define DEVICE
#define HOST #define HOST
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册