提交 6f932482 编写于 作者: T Tomasz Patejko

MKL elementwise_add: BLAS version compiles with integral types

上级 e43c8f33
......@@ -18,10 +18,10 @@ namespace ops = paddle::operators;
REGISTER_ELEMWISE_OP(elementwise_add, "Add", "Out = X + Y");
REGISTER_OP_CPU_KERNEL(
elementwise_add,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, float>);
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, double>);
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int>,
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int64_t>);
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_CPU_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<paddle::platform::CPUDeviceContext, float>,
......
......@@ -26,6 +26,34 @@ struct AddFunctor {
inline HOSTDEVICE T operator()(T a, T b) const { return a + b; }
};
template <typename DeviceContext, typename T>
void default_elementwise_add(const framework::ExecutionContext& ctx,
const framework::Tensor* x,
const framework::Tensor* y, framework::Tensor* z) {
int axis = ctx.Attr<int>("axis");
ElementwiseComputeEx<AddFunctor<T>, DeviceContext, T>(ctx, x, y, axis,
AddFunctor<T>(), z);
}
template <typename DeviceContext, typename T>
typename std::enable_if<std::is_floating_point<T>::value>::type elementwise_add(
const framework::ExecutionContext& ctx, const framework::Tensor* x,
const framework::Tensor* y, framework::Tensor* z) {
auto eigen_x = framework::EigenVector<T>::Flatten(*x);
auto eigen_y = framework::EigenVector<T>::Flatten(*y);
auto eigen_z = framework::EigenVector<T>::Flatten(*z);
auto blas = math::GetBlas<DeviceContext, T>(ctx);
blas.VADD(x->numel(), eigen_x.data(), eigen_y.data(), eigen_z.data());
}
template <typename DeviceContext, typename T>
typename std::enable_if<std::is_integral<T>::value>::type elementwise_add(
const framework::ExecutionContext& ctx, const framework::Tensor* x,
const framework::Tensor* y, framework::Tensor* z) {
default_elementwise_add<DeviceContext, T>(ctx, x, y, z);
}
template <typename DeviceContext, typename T>
class ElementwiseAddKernel : public framework::OpKernel<T> {
public:
......@@ -36,19 +64,12 @@ class ElementwiseAddKernel : public framework::OpKernel<T> {
const auto y = ctx.Input<Tensor>("Y");
auto z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace());
int axis = ctx.Attr<int>("axis");
auto dims_equal = x->dims() == y->dims();
if (platform::is_cpu_place(ctx.GetPlace()) && dims_equal) {
auto eigen_x = framework::EigenVector<T>::Flatten(*x);
auto eigen_y = framework::EigenVector<T>::Flatten(*y);
auto eigen_z = framework::EigenVector<T>::Flatten(*z);
auto blas = math::GetBlas<DeviceContext, T>(ctx);
blas.VADD(x->numel(), eigen_x.data(), eigen_y.data(), eigen_z.data());
elementwise_add<DeviceContext, T>(ctx, x, y, z);
} else {
ElementwiseComputeEx<AddFunctor<T>, DeviceContext, T>(ctx, x, y, axis,
AddFunctor<T>(), z);
default_elementwise_add<DeviceContext, T>(ctx, x, y, z);
}
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册