未验证 提交 bbe441fc 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

【Phi】Migrate poisson op into phi (#39814)

* Migrate poisson op into phi

* fix CI

* fix comment
上级 23bbd912
...@@ -13,8 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/poisson_op.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -23,14 +25,6 @@ class PoissonOp : public framework::OperatorWithKernel { ...@@ -23,14 +25,6 @@ class PoissonOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "PoissonOp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "PoissonOp");
auto dim = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", dim);
}
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
...@@ -61,29 +55,6 @@ class PoissonOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput { ...@@ -61,29 +55,6 @@ class PoissonOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
} }
}; };
template <typename T>
class PoissonKernel<platform::CPUDeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
const T *x_data = x->data<T>();
T *out_data = out->mutable_data<T>(ctx.GetPlace());
int64_t size = x->numel();
auto gen = framework::DefaultCPUGenerator();
auto engine = gen->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
std::poisson_distribution<> dist(x_data[i]);
out_data[i] = static_cast<T>(dist(*engine));
}
}
};
class PoissonGradOp : public framework::OperatorWithKernel { class PoissonGradOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -116,17 +87,13 @@ class PoissonGradOpMaker : public framework::SingleGradOpMaker<T> { ...@@ -116,17 +87,13 @@ class PoissonGradOpMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform; namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(poisson, PoissonInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(poisson, ops::PoissonOp, ops::PoissonOpMaker, REGISTER_OPERATOR(poisson, ops::PoissonOp, ops::PoissonOpMaker,
ops::PoissonOpInferVarType, ops::PoissonOpInferVarType,
ops::PoissonGradOpMaker<paddle::framework::OpDesc>, ops::PoissonGradOpMaker<paddle::framework::OpDesc>,
ops::PoissonGradOpMaker<paddle::imperative::OpBase>); ops::PoissonGradOpMaker<paddle::imperative::OpBase>,
PoissonInferShapeFunctor);
REGISTER_OPERATOR(poisson_grad, ops::PoissonGradOp); REGISTER_OPERATOR(poisson_grad, ops::PoissonGradOp);
REGISTER_OP_CPU_KERNEL(poisson,
ops::PoissonKernel<plat::CPUDeviceContext, float>,
ops::PoissonKernel<plat::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(poisson_grad,
ops::PoissonGradKernel<plat::CPUDeviceContext, float>,
ops::PoissonGradKernel<plat::CPUDeviceContext, double>);
...@@ -103,4 +103,5 @@ void UnfoldInferMeta(const MetaTensor& x, ...@@ -103,4 +103,5 @@ void UnfoldInferMeta(const MetaTensor& x,
const std::vector<int>& dilations, const std::vector<int>& dilations,
MetaTensor* out, MetaTensor* out,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
} // namespace phi } // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/poisson_grad_kernel_impl.h"
PD_REGISTER_KERNEL(
poisson_grad, CPU, ALL_LAYOUT, phi::PoissonGradKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <random>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/poisson_kernel.h"
namespace phi {
template <typename T, typename Context>
void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
int64_t size = x.numel();
auto gen = ctx.GetGenerator();
auto engine = gen->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
std::poisson_distribution<> dist(x_data[i]);
out_data[i] = static_cast<T>(dist(*engine));
}
}
} // namespace phi
PD_REGISTER_KERNEL(
poisson, CPU, ALL_LAYOUT, phi::PoissonKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/poisson_grad_kernel_impl.h"
PD_REGISTER_KERNEL(
poisson_grad, GPU, ALL_LAYOUT, phi::PoissonGradKernel, float, double) {}
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -18,16 +18,20 @@ limitations under the License. */ ...@@ -18,16 +18,20 @@ limitations under the License. */
#ifdef __HIPCC__ #ifdef __HIPCC__
#include <hiprand_kernel.h> #include <hiprand_kernel.h>
#endif #endif
#include "paddle/fluid/operators/poisson_op.h"
#include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/poisson_kernel.h"
namespace paddle { namespace phi {
namespace operators {
template <typename T> template <typename T>
struct PoissonCudaFunctor { struct PoissonCudaFunctor {
public: public:
PoissonCudaFunctor(const T* in, T* out, unsigned int seed, PoissonCudaFunctor(const T* in,
T* out,
unsigned int seed,
unsigned int offset) unsigned int offset)
: in_(in), out_(out), seed_(seed), offset_(offset) {} : in_(in), out_(out), seed_(seed), offset_(offset) {}
...@@ -50,42 +54,24 @@ struct PoissonCudaFunctor { ...@@ -50,42 +54,24 @@ struct PoissonCudaFunctor {
const unsigned int offset_; const unsigned int offset_;
}; };
template <typename T> template <typename T, typename Context>
class PoissonKernel<platform::CUDADeviceContext, T> void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
: public framework::OpKernel<T> { const T* x_data = x.data<T>();
public: T* out_data = ctx.template Alloc<T>(out);
void Compute(const framework::ExecutionContext& ctx) const override { auto size = x.numel();
const auto* x = ctx.Input<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
auto size = x->numel();
int64_t device_id = ctx.GetPlace().GetDeviceId();
auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); auto gen_cuda = ctx.GetGenerator();
auto seed_offset = gen_cuda->IncrementOffset(20); auto seed_offset = gen_cuda->IncrementOffset(20);
uint64_t seed = seed_offset.first; uint64_t seed = seed_offset.first;
uint64_t offset = seed_offset.second; uint64_t offset = seed_offset.second;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); paddle::platform::ForRange<Context> for_range(ctx, size);
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, size);
PoissonCudaFunctor<T> functor(x_data, out_data, seed, offset); PoissonCudaFunctor<T> functor(x_data, out_data, seed, offset);
for_range(functor); for_range(functor);
} }
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(poisson, } // namespace phi
ops::PoissonKernel<plat::CUDADeviceContext, float>,
ops::PoissonKernel<plat::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL( PD_REGISTER_KERNEL(
poisson_grad, ops::PoissonGradKernel<plat::CUDADeviceContext, float>, poisson, GPU, ALL_LAYOUT, phi::PoissonKernel, float, double) {}
ops::PoissonGradKernel<plat::CUDADeviceContext, double>);
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
...@@ -14,28 +14,16 @@ ...@@ -14,28 +14,16 @@
#pragma once #pragma once
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/poisson_grad_kernel.h"
namespace paddle { namespace phi {
namespace operators {
template <typename DeviceContext, typename T> template <typename T, typename Context>
class PoissonKernel; void PoissonGradKernel(const Context& ctx, DenseTensor* x_grad) {
ctx.template Alloc<T>(x_grad);
phi::funcs::SetConstant<Context, T> functor;
functor(ctx, x_grad, static_cast<T>(0));
}
template <typename DeviceContext, typename T> } // namespace phi
class PoissonGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> functor;
auto& dev_ctx = ctx.template device_context<DeviceContext>();
functor(dev_ctx, dx, static_cast<T>(0));
}
};
} // namespace operators
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
template <typename T, typename Context>
void PoissonGradKernel(const Context& ctx, DenseTensor* x_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature PoissonGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("poisson_grad", {}, {}, {GradVarName("X")});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(poisson_grad, phi::PoissonGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册