未验证 提交 63203c4a 编写于 作者: J Jack Zhou 提交者: GitHub

enhance reduce op which can reduce tensor with arbitrary rank

enhance reduce op which can reduce tensor with arbitrary rank 
上级 69279207
...@@ -22,10 +22,12 @@ limitations under the License. */ ...@@ -22,10 +22,12 @@ limitations under the License. */
#include <cblas.h> #include <cblas.h>
#endif #endif
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -63,6 +65,55 @@ DEFINE_CPU_TRANS(4); ...@@ -63,6 +65,55 @@ DEFINE_CPU_TRANS(4);
DEFINE_CPU_TRANS(5); DEFINE_CPU_TRANS(5);
DEFINE_CPU_TRANS(6); DEFINE_CPU_TRANS(6);
template <typename T>
struct TransposeNormal<platform::CPUDeviceContext, T> {
void operator()(const platform::CPUDeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
const T* in_ptr = in.data<T>();
T* out_ptr = out->data<T>();
auto transpose_helper = [&](int64_t beg, int64_t end) {
for (int64_t out_idx = beg; out_idx < end; ++out_idx) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
// calculate the input index
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride[i];
tmp_idx -= coordinate * out_stride[i];
in_idx += coordinate * in_stride[axis[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
};
double cost_per_iteration =
rank * (Eigen::TensorOpCost::DivCost<int64_t>() +
2 * Eigen::TensorOpCost::MulCost<int64_t>() +
2 * Eigen::TensorOpCost::AddCost<int64_t>());
Eigen::TensorOpCost cost(sizeof(T), sizeof(T), cost_per_iteration);
auto* cpu_device = context.eigen_pool_device();
cpu_device->parallelFor(out->numel(), cost, std::move(transpose_helper));
}
};
// define transpose normal
#define DEFINE_CPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CPUDeviceContext, TYPE>
DEFINE_CPU_TRANS_NORMAL(platform::float16);
DEFINE_CPU_TRANS_NORMAL(platform::bfloat16);
DEFINE_CPU_TRANS_NORMAL(float);
DEFINE_CPU_TRANS_NORMAL(double);
DEFINE_CPU_TRANS_NORMAL(int);
DEFINE_CPU_TRANS_NORMAL(int64_t);
DEFINE_CPU_TRANS_NORMAL(bool);
DEFINE_CPU_TRANS_NORMAL(int16_t);
DEFINE_CPU_TRANS_NORMAL(uint8_t);
DEFINE_CPU_TRANS_NORMAL(int8_t);
struct TensorSetConstantCPU { struct TensorSetConstantCPU {
TensorSetConstantCPU(framework::Tensor* tensor, float value) TensorSetConstantCPU(framework::Tensor* tensor, float value)
: tensor_(tensor), value_(value) {} : tensor_(tensor), value_(value) {}
......
...@@ -11,8 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,8 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include <vector> #include <vector>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/operators/math/math_function_impl.h"
...@@ -23,6 +26,7 @@ namespace operators { ...@@ -23,6 +26,7 @@ namespace operators {
namespace math { namespace math {
using float16 = paddle::platform::float16; using float16 = paddle::platform::float16;
using bfloat16 = paddle::platform::bfloat16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>; template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, float>; template struct SetConstant<platform::CUDADeviceContext, float>;
...@@ -35,6 +39,7 @@ template struct SetConstant<platform::CUDADeviceContext, bool>; ...@@ -35,6 +39,7 @@ template struct SetConstant<platform::CUDADeviceContext, bool>;
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \ template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \ template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \ template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, bfloat16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \ template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \ template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>; template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>;
...@@ -46,6 +51,88 @@ DEFINE_GPU_TRANS(4); ...@@ -46,6 +51,88 @@ DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5); DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6); DEFINE_GPU_TRANS(6);
#define REINTERPRET(T, DST_PTR, SRC_PTR) \
T* DST_PTR = reinterpret_cast<T*>(SRC_PTR)
template <typename T>
__global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr,
int64_t element,
const int64_t* in_stride_ptr,
const int64_t* out_stride_ptr,
const int64_t* axis_ptr, int rank) {
CUDA_KERNEL_LOOP(out_idx, element) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride_ptr[i];
tmp_idx -= coordinate * out_stride_ptr[i];
in_idx += coordinate * in_stride_ptr[axis_ptr[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
}
template <typename T>
struct TransposeNormal<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
auto* in_ptr = in.data<T>();
auto* out_ptr = out->data<T>();
// copy in_stride, out_stride, axis to gpu device
const platform::CUDAPlace& cuda_place =
BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
platform::CPUPlace cpu_place = platform::CPUPlace();
size_t size = 3 * rank * sizeof(int64_t);
auto cpu_buf_holder = memory::AllocShared(cpu_place, size);
auto cuda_buf_holder = memory::AllocShared(cuda_place, size);
REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr());
REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr());
for (int i = 0; i < rank; ++i) {
cpu_buf[i] = in_stride[i];
cpu_buf[rank + i] = out_stride[i];
cpu_buf[2 * rank + i] = axis[i];
}
memory::Copy(cuda_place, cuda_buf, cpu_place, cpu_buf, size,
context.stream());
REINTERPRET(const int64_t, in_stride_ptr, cuda_buf);
REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank);
REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank);
const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock();
const int MAX_GRID_DIM =
context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int64_t elements = in.numel();
int block_size = (elements >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(elements)));
int grid_size = elements / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
TransposeNormalKernel<T><<<grid_size, block_size, 0, context.stream()>>>(
in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr,
rank);
}
};
// define transpose normal
#define DEFINE_GPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CUDADeviceContext, TYPE>
DEFINE_GPU_TRANS_NORMAL(float16);
DEFINE_GPU_TRANS_NORMAL(bfloat16);
DEFINE_GPU_TRANS_NORMAL(float);
DEFINE_GPU_TRANS_NORMAL(double);
DEFINE_GPU_TRANS_NORMAL(int);
DEFINE_GPU_TRANS_NORMAL(int64_t);
DEFINE_GPU_TRANS_NORMAL(bool);
DEFINE_GPU_TRANS_NORMAL(int16_t);
DEFINE_GPU_TRANS_NORMAL(uint8_t);
DEFINE_GPU_TRANS_NORMAL(int8_t);
struct TensorSetConstantGPU { struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context, TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value) framework::Tensor* tensor, float value)
......
...@@ -26,6 +26,14 @@ limitations under the License. */ ...@@ -26,6 +26,14 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
namespace math { namespace math {
template <typename DeviceContext, typename T>
struct TransposeNormal {
// for dims >= 7 situation
void operator()(const DeviceContext& context, const framework::Tensor& in,
framework::Tensor* out, const std::vector<int>& axis);
};
template <typename DeviceContext, typename T, int Rank> template <typename DeviceContext, typename T, int Rank>
struct Transpose { struct Transpose {
void operator()(const DeviceContext& context, const framework::Tensor& in, void operator()(const DeviceContext& context, const framework::Tensor& in,
......
...@@ -18,9 +18,10 @@ limitations under the License. */ ...@@ -18,9 +18,10 @@ limitations under the License. */
#include <set> #include <set>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/cast_op.h" #include "paddle/fluid/operators/cast_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h" #include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
namespace paddle { namespace paddle {
...@@ -34,6 +35,110 @@ namespace operators { ...@@ -34,6 +35,110 @@ namespace operators {
} }
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using DDim = framework::DDim;
inline void GetShuffledDim(const DDim& src_dims, DDim* dst_dims,
const std::vector<int>& reduced_dims,
std::vector<int>* perm_axis) {
// check if it's a reduced dim
std::vector<bool> src_dims_check(src_dims.size(), false);
size_t src_size = src_dims.size();
size_t reduce_size = reduced_dims.size();
for (size_t i = 0; i < reduce_size; ++i) {
dst_dims->at(src_size - reduce_size + i) = src_dims[reduced_dims[i]];
(*perm_axis)[src_size - reduce_size + i] = reduced_dims[i];
src_dims_check[reduced_dims[i]] = true;
}
size_t offset = 0;
for (size_t i = 0; i < src_dims_check.size(); ++i) {
bool is_reduced = src_dims_check[i];
if (!is_reduced) {
(*perm_axis)[offset] = i;
dst_dims->at(offset++) = src_dims[i];
}
}
}
template <typename DeviceContext, typename OutT>
void GetShuffledInput(const framework::ExecutionContext& context,
const Tensor* input, Tensor* shuffled_input,
const std::vector<int>& dims) {
DDim shuffled_dims(input->dims());
std::vector<int> perm_axis(input->dims().size());
GetShuffledDim(input->dims(), &shuffled_dims, dims, &perm_axis);
shuffled_input->Resize(shuffled_dims);
shuffled_input->mutable_data<OutT>(context.GetPlace());
math::TransposeNormal<DeviceContext, OutT> trans;
trans(context.template device_context<DeviceContext>(), *input,
shuffled_input, perm_axis);
}
inline void GetOriginDimFromShuffled(const DDim& src_dim,
const std::vector<int>& dims,
std::vector<int>* origin_dim) {
DDim shuffled_dims(src_dim);
size_t n = src_dim.size();
std::vector<int> perm_axis(n);
GetShuffledDim(src_dim, &shuffled_dims, dims, &perm_axis);
for (size_t i = 0; i < n; ++i) {
(*origin_dim)[perm_axis[i]] = i;
}
}
template <typename DeviceContext, typename OutT, typename Functor>
void HandleLargeDim(const framework::ExecutionContext& context,
const Tensor* input, Tensor* output,
const std::vector<int>& dims, bool keep_dim) {
// shuffle the reduced dim to the end
Tensor shuffled_input;
GetShuffledInput<DeviceContext, OutT>(context, input, &shuffled_input, dims);
// transpose to 2D tensor whose shape is {unreduced, reduced}.
const int64_t unreduced = output->numel();
const int64_t reduced = shuffled_input.numel() / unreduced;
shuffled_input.Resize({unreduced, reduced});
DDim output_dim = output->dims();
output->Resize({unreduced});
ReduceFunctor<DeviceContext, OutT, 2, 1, Functor>(
context.template device_context<DeviceContext>(), shuffled_input, output,
{1}, keep_dim);
output->Resize(output_dim);
}
template <typename DeviceContext, typename T, typename Functor>
void HandleLargeDimGrad(const framework::ExecutionContext& context,
const framework::Tensor* x,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
const std::vector<int>& dims) {
const int64_t unreduced = out->numel();
const int64_t reduced = x->numel() / unreduced;
DDim out_dim(out->dims());
DDim x_dim(x->dims());
// transpose and reshape X
Tensor shuffled_x;
GetShuffledInput<DeviceContext, T>(context, x, &shuffled_x, dims);
DDim shuffled_dim = shuffled_x.dims();
shuffled_x.Resize({unreduced, reduced});
// reshape dX {unreduced, reduced}
dx->Resize({unreduced, reduced});
ReduceGradFunctor<DeviceContext, T, 2, Functor>(
context.template device_context<DeviceContext>(), shuffled_x, *out, *dout,
dx, {1});
// transpose dX
std::vector<int> origin_axis(x_dim.size());
GetOriginDimFromShuffled(x_dim, dims, &origin_axis);
Tensor dx_tmp;
framework::TensorCopy(*dx, context.GetPlace(), &dx_tmp);
dx_tmp.Resize(shuffled_dim);
dx->Resize(x_dim);
math::TransposeNormal<DeviceContext, T> trans;
trans(context.template device_context<DeviceContext>(), dx_tmp, dx,
origin_axis);
}
template <typename DeviceContext, typename T, typename Functor> template <typename DeviceContext, typename T, typename Functor>
struct ReduceKernelFunctor { struct ReduceKernelFunctor {
...@@ -69,6 +174,10 @@ struct ReduceKernelFunctor { ...@@ -69,6 +174,10 @@ struct ReduceKernelFunctor {
} else { } else {
int ndim = input->dims().size(); int ndim = input->dims().size();
int rdim = dims.size(); int rdim = dims.size();
if (ndim > 6) {
HandleLargeDim<DeviceContext, OutT, Functor>(context, input, output,
dims, keep_dim);
} else {
HANDLE_DIM(6, 5); HANDLE_DIM(6, 5);
HANDLE_DIM(6, 4); HANDLE_DIM(6, 4);
HANDLE_DIM(6, 3); HANDLE_DIM(6, 3);
...@@ -87,6 +196,7 @@ struct ReduceKernelFunctor { ...@@ -87,6 +196,7 @@ struct ReduceKernelFunctor {
HANDLE_DIM(1, 1); HANDLE_DIM(1, 1);
} }
} }
}
}; };
template <typename DeviceContext, typename T, typename Functor> template <typename DeviceContext, typename T, typename Functor>
class ReduceKernel : public framework::OpKernel<T> { class ReduceKernel : public framework::OpKernel<T> {
...@@ -137,7 +247,6 @@ class ReduceKernel : public framework::OpKernel<T> { ...@@ -137,7 +247,6 @@ class ReduceKernel : public framework::OpKernel<T> {
} }
} }
}; };
template <typename DeviceContext, typename OutT, typename Functor> template <typename DeviceContext, typename OutT, typename Functor>
class BoolReduceKernel : public framework::OpKernel<OutT> { class BoolReduceKernel : public framework::OpKernel<OutT> {
public: public:
...@@ -175,15 +284,19 @@ class BoolReduceKernel : public framework::OpKernel<OutT> { ...@@ -175,15 +284,19 @@ class BoolReduceKernel : public framework::OpKernel<OutT> {
int ndim = input->dims().size(); int ndim = input->dims().size();
int rdim = dims.size(); int rdim = dims.size();
// comments for accelerating compiling temporarily. // comments for accelerating compiling temporarily.
// HANDLE_DIM(6, 5); if (ndim > 6) {
// HANDLE_DIM(6, 4); HandleLargeDim<DeviceContext, OutT, Functor>(context, input, output,
// HANDLE_DIM(6, 3); dims, keep_dim);
// HANDLE_DIM(6, 2); } else {
// HANDLE_DIM(6, 1); HANDLE_DIM(6, 5);
// HANDLE_DIM(5, 4); HANDLE_DIM(6, 4);
// HANDLE_DIM(5, 3); HANDLE_DIM(6, 3);
// HANDLE_DIM(5, 2); HANDLE_DIM(6, 2);
// HANDLE_DIM(5, 1); HANDLE_DIM(6, 1);
HANDLE_DIM(5, 4);
HANDLE_DIM(5, 3);
HANDLE_DIM(5, 2);
HANDLE_DIM(5, 1);
HANDLE_DIM(4, 3); HANDLE_DIM(4, 3);
HANDLE_DIM(4, 2); HANDLE_DIM(4, 2);
HANDLE_DIM(4, 1); HANDLE_DIM(4, 1);
...@@ -193,6 +306,7 @@ class BoolReduceKernel : public framework::OpKernel<OutT> { ...@@ -193,6 +306,7 @@ class BoolReduceKernel : public framework::OpKernel<OutT> {
HANDLE_DIM(1, 1); HANDLE_DIM(1, 1);
} }
} }
}
}; };
template <typename DeviceContext, typename T, typename Functor, template <typename DeviceContext, typename T, typename Functor,
bool kNoNeedBufferX = false, bool kNoNeedBufferY = false> bool kNoNeedBufferX = false, bool kNoNeedBufferY = false>
...@@ -279,6 +393,10 @@ class ReduceGradKernel : public framework::OpKernel<T> { ...@@ -279,6 +393,10 @@ class ReduceGradKernel : public framework::OpKernel<T> {
context.template device_context<DeviceContext>(), *input0, context.template device_context<DeviceContext>(), *input0,
*input1, *input2, output, dims); *input1, *input2, output, dims);
break; break;
default:
HandleLargeDimGrad<DeviceContext, T, Functor>(context, input0, input1,
input2, output, dims);
break;
} }
} }
} }
...@@ -313,12 +431,6 @@ class ReduceOp : public framework::OperatorWithKernel { ...@@ -313,12 +431,6 @@ class ReduceOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ReduceOp"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ReduceOp");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_rank = x_dims.size(); auto x_rank = x_dims.size();
PADDLE_ENFORCE_LE(x_rank, 6,
platform::errors::InvalidArgument(
"The input tensor X's dimensions of ReduceOp "
"should be less equal than 6. But received X's "
"dimensions = %d, X's shape = [%s].",
x_rank, x_dims));
auto dims = ctx->Attrs().Get<std::vector<int>>("dim"); auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
PADDLE_ENFORCE_GT(dims.size(), 0, PADDLE_ENFORCE_GT(dims.size(), 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -402,11 +514,6 @@ class ReduceGradOp : public framework::OperatorWithKernel { ...@@ -402,11 +514,6 @@ class ReduceGradOp : public framework::OperatorWithKernel {
"Out@GRAD", "ReduceOp"); "Out@GRAD", "ReduceOp");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_rank = x_dims.size(); auto x_rank = x_dims.size();
PADDLE_ENFORCE_LE(x_rank, 6,
platform::errors::InvalidArgument(
"Tensors with rank at most 6 are supported by "
"ReduceOp. Received tensor with rank %d.",
x_rank));
auto dims = ctx->Attrs().Get<std::vector<int>>("dim"); auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
for (size_t i = 0; i < dims.size(); ++i) { for (size_t i = 0; i < dims.size(); ++i) {
PADDLE_ENFORCE_LT(dims[i], x_rank, PADDLE_ENFORCE_LT(dims[i], x_rank,
......
...@@ -53,10 +53,9 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, ...@@ -53,10 +53,9 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
trans6(dev_ctx, in, out, axis); trans6(dev_ctx, in, out, axis);
break; break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( // for dim >= 7 situation
"Tensors with rank at most 6 are supported" math::TransposeNormal<DeviceContext, T> trans_normal;
", but received input tensor's rank is %d,", trans_normal(dev_ctx, in, out, axis);
dim));
} }
} }
......
...@@ -12,6 +12,7 @@ limitations under the License. */ ...@@ -12,6 +12,7 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include <set> #include <set>
#include <string> #include <string>
#include <thread> //NOLINT
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
...@@ -23,6 +24,7 @@ limitations under the License. */ ...@@ -23,6 +24,7 @@ limitations under the License. */
#endif #endif
#include "glog/logging.h" #include "glog/logging.h"
#include "unsupported/Eigen/CXX11/ThreadPool"
namespace paddle { namespace paddle {
namespace memory { namespace memory {
...@@ -131,16 +133,31 @@ DeviceContextPool::DeviceContextPool( ...@@ -131,16 +133,31 @@ DeviceContextPool::DeviceContextPool(
CPUDeviceContext::CPUDeviceContext() { CPUDeviceContext::CPUDeviceContext() {
eigen_device_.reset(new Eigen::DefaultDevice()); eigen_device_.reset(new Eigen::DefaultDevice());
InitPoolDevice();
} }
CPUDeviceContext::CPUDeviceContext(CPUPlace place) : place_(place) { CPUDeviceContext::CPUDeviceContext(CPUPlace place) : place_(place) {
eigen_device_.reset(new Eigen::DefaultDevice()); eigen_device_.reset(new Eigen::DefaultDevice());
InitPoolDevice();
}
void CPUDeviceContext::InitPoolDevice() {
using EigenEnv = Eigen::StlThreadEnvironment;
using EigenThreadPool = Eigen::ThreadPoolTempl<EigenEnv>;
int num_threads = std::thread::hardware_concurrency();
eigen_threadpool_.reset(new EigenThreadPool(num_threads));
eigen_pool_device_.reset(
new Eigen::ThreadPoolDevice(eigen_threadpool_.get(), num_threads));
} }
Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const { Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const {
return eigen_device_.get(); return eigen_device_.get();
} }
Eigen::ThreadPoolDevice* CPUDeviceContext::eigen_pool_device() const {
return eigen_pool_device_.get();
}
Place CPUDeviceContext::GetPlace() const { return place_; } Place CPUDeviceContext::GetPlace() const { return place_; }
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
......
...@@ -41,6 +41,7 @@ limitations under the License. */ ...@@ -41,6 +41,7 @@ limitations under the License. */
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/stream/cuda_stream.h" #include "paddle/fluid/platform/stream/cuda_stream.h"
#endif #endif
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor" #include "unsupported/Eigen/CXX11/Tensor"
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
...@@ -65,11 +66,17 @@ class CPUDeviceContext : public DeviceContext { ...@@ -65,11 +66,17 @@ class CPUDeviceContext : public DeviceContext {
Eigen::DefaultDevice* eigen_device() const; Eigen::DefaultDevice* eigen_device() const;
Eigen::ThreadPoolDevice* eigen_pool_device() const;
Place GetPlace() const override; Place GetPlace() const override;
inline void InitPoolDevice();
private: private:
CPUPlace place_; CPUPlace place_;
std::unique_ptr<Eigen::DefaultDevice> eigen_device_; std::unique_ptr<Eigen::DefaultDevice> eigen_device_;
std::unique_ptr<Eigen::ThreadPoolDevice> eigen_pool_device_;
std::unique_ptr<Eigen::ThreadPool> eigen_threadpool_;
}; };
template <typename Place> template <typename Place>
......
...@@ -67,6 +67,22 @@ class TestSumOp6D(OpTest): ...@@ -67,6 +67,22 @@ class TestSumOp6D(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestSumOp8D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
}
self.attrs = {'dim': (0, 3)}
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
@skip_check_grad_ci( @skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function," reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.") " its gradient check is not supported by unittest framework.")
...@@ -103,6 +119,40 @@ class TestMinOp(OpTest): ...@@ -103,6 +119,40 @@ class TestMinOp(OpTest):
self.check_output() self.check_output()
class TestMin6DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [2, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMin8DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestProdOp(OpTest): class TestProdOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_prod" self.op_type = "reduce_prod"
...@@ -116,6 +166,42 @@ class TestProdOp(OpTest): ...@@ -116,6 +166,42 @@ class TestProdOp(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestProd6DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.inputs = {
'X': np.random.random((5, 6, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestProd8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestAllOp(OpTest): class TestAllOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
...@@ -127,12 +213,40 @@ class TestAllOp(OpTest): ...@@ -127,12 +213,40 @@ class TestAllOp(OpTest):
self.check_output() self.check_output()
class TestAll8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAllOpWithDim(OpTest): class TestAllOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_all" self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]} self.attrs = {'dim': (1, )}
self.outputs = {'Out': self.inputs['X'].all(axis=1)} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAll8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -152,6 +266,23 @@ class TestAllOpWithKeepDim(OpTest): ...@@ -152,6 +266,23 @@ class TestAllOpWithKeepDim(OpTest):
self.check_output() self.check_output()
class TestAll8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (5, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
}
def test_check_output(self):
self.check_output()
class TestAllOpError(unittest.TestCase): class TestAllOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -175,6 +306,20 @@ class TestAnyOp(OpTest): ...@@ -175,6 +306,20 @@ class TestAnyOp(OpTest):
self.check_output() self.check_output()
class TestAny8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithDim(OpTest): class TestAnyOpWithDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
...@@ -186,14 +331,45 @@ class TestAnyOpWithDim(OpTest): ...@@ -186,14 +331,45 @@ class TestAnyOpWithDim(OpTest):
self.check_output() self.check_output()
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 6)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithKeepDim(OpTest): class TestAnyOpWithKeepDim(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_any" self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True} self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = { self.outputs = {
'Out': np.expand_dims( 'Out': np.expand_dims(
self.inputs['X'].any(axis=1), axis=1) self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
} }
def test_check_output(self): def test_check_output(self):
...@@ -283,6 +459,18 @@ class Test3DReduce3(Test1DReduce): ...@@ -283,6 +459,18 @@ class Test3DReduce3(Test1DReduce):
} }
class Test8DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': (4, 2, 3)}
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class TestKeepDimReduce(Test1DReduce): class TestKeepDimReduce(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
...@@ -294,6 +482,19 @@ class TestKeepDimReduce(Test1DReduce): ...@@ -294,6 +482,19 @@ class TestKeepDimReduce(Test1DReduce):
} }
class TestKeepDim8DReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestReduceAll(Test1DReduce): class TestReduceAll(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
...@@ -302,6 +503,16 @@ class TestReduceAll(Test1DReduce): ...@@ -302,6 +503,16 @@ class TestReduceAll(Test1DReduce):
self.outputs = {'Out': self.inputs['X'].sum()} self.outputs = {'Out': self.inputs['X'].sum()}
class TestReduceAll(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'reduce_all': True, 'dim': (3, 4, 5)}
self.outputs = {'Out': self.inputs['X'].sum(axis=self.attrs['dim'])}
@skip_check_grad_ci( @skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function," reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.") " its gradient check is not supported by unittest framework.")
......
...@@ -99,6 +99,18 @@ class TestCase7(TestTransposeOp): ...@@ -99,6 +99,18 @@ class TestCase7(TestTransposeOp):
self.axis = (0, 1, 3, 2) self.axis = (0, 1, 3, 2)
class TestCase8(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (0, 1, 3, 2, 4, 5, 6, 7)
class TestCase9(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
class TestTransposeOpError(unittest.TestCase): class TestTransposeOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册