未验证 提交 324b6b71 编写于 作者: C Chen Weihang 提交者: GitHub

[Phi] Move assign value op kernel into phi (#40967)

* move assign value op kernel

* remove original op

* fix failed unittests

* remove infershape
上级 3d5a27f0
......@@ -16,6 +16,10 @@
#include <string>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/nullary.h"
namespace paddle {
namespace framework {
class InferShapeContext;
......@@ -39,14 +43,6 @@ class AssignValueOp : public framework::OperatorWithKernel {
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::NotFound("Output(Out) of assign_op is not found."));
auto shape = ctx->Attrs().Get<std::vector<int>>("shape");
ctx->SetOutputDim("Out", phi::make_ddim(shape));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
......@@ -89,11 +85,10 @@ $$Out = values$$
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(assign_value, AssignValueInferShapeFunctor,
PD_INFER_META(phi::AssignValueInferMeta));
REGISTER_OPERATOR(
assign_value, ops::AssignValueOp, ops::AssignValueOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(assign_value, ops::AssignValueKernel<bool>,
ops::AssignValueKernel<int>,
ops::AssignValueKernel<float>,
ops::AssignValueKernel<int64_t>);
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
AssignValueInferShapeFunctor);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/assign_value_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(assign_value, ops::AssignValueKernel<bool>,
ops::AssignValueKernel<int>,
ops::AssignValueKernel<float>,
ops::AssignValueKernel<int64_t>);
......@@ -16,6 +16,13 @@ limitations under the License. */
namespace phi {
void AssignValueInferMeta(const std::vector<int>& shape,
DataType dtype,
MetaTensor* out) {
out->set_dims(phi::make_ddim(shape));
out->set_dtype(dtype);
}
void CreateInferMeta(const ScalarArray& shape,
DataType dtype,
MetaTensor* out) {
......
......@@ -30,6 +30,10 @@ namespace phi {
//
// The InferMeta Functions in this file are arranged in alphabetic order.
void AssignValueInferMeta(const std::vector<int>& shape,
DataType dtype,
MetaTensor* out);
void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out);
void CreateInferMetaBase(const std::vector<int64_t>& shape,
......
......@@ -18,6 +18,8 @@
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/utils/optional.h"
#include "paddle/fluid/framework/tensor_util.h"
namespace phi {
template <typename Context>
......@@ -42,6 +44,62 @@ void AssignArrayKernel(const Context& dev_ctx,
}
}
template <typename T, typename Context>
typename std::enable_if<std::is_same<T, bool>::value>::type CopyVecotorToTensor(
const Context& dev_ctx,
const std::vector<Scalar>& values,
DenseTensor* out) {
// If attribute value dtype is vector<bool>, it will be converted to
// vector<int>. at the same time, we can not use vector<bool> to hold
// the value, because the c++ use bit value to replace byte value.
std::vector<int> assign_values;
assign_values.reserve(values.size());
for (const auto& val : values) {
assign_values.emplace_back(val.to<int>());
}
paddle::framework::TensorFromVector(assign_values, dev_ctx, out);
// use the array to replace to vector
bool* array_ptr = new T[assign_values.size()];
for (unsigned int i = 0; i < assign_values.size(); i++) {
array_ptr[i] = static_cast<T>(assign_values[i]);
}
paddle::framework::TensorFromArray(
array_ptr, assign_values.size(), dev_ctx, out);
delete[] array_ptr;
}
template <typename T, typename Context>
typename std::enable_if<!std::is_same<T, bool>::value>::type
CopyVecotorToTensor(const Context& dev_ctx,
const std::vector<Scalar>& values,
DenseTensor* out) {
std::vector<T> assign_values;
assign_values.reserve(values.size());
for (const auto& val : values) {
assign_values.emplace_back(val.to<T>());
}
paddle::framework::TensorFromVector(assign_values, dev_ctx, out);
}
template <typename T, typename Context>
void AssignValueKernel(const Context& dev_ctx,
const std::vector<int>& shape,
DataType dtype,
const std::vector<Scalar>& values,
DenseTensor* out) {
auto template_dtype = paddle::experimental::CppTypeToDataType<T>::Type();
PADDLE_ENFORCE_EQ(
dtype,
template_dtype,
phi::errors::InvalidArgument("Argument dtype mismatch for kernel dtype, "
"argument dtype is %s, kernel dtype is %s.",
dtype,
template_dtype));
CopyVecotorToTensor<T>(dev_ctx, values, out);
out->Resize(phi::make_ddim(shape));
}
} // namespace phi
PD_REGISTER_GENERAL_KERNEL(
......@@ -51,6 +109,14 @@ PD_REGISTER_GENERAL_KERNEL(assign_array,
ALL_LAYOUT,
phi::AssignArrayKernel<phi::CPUContext>,
ALL_DTYPE) {}
PD_REGISTER_KERNEL(assign_value,
CPU,
ALL_LAYOUT,
phi::AssignValueKernel,
bool,
int,
float,
int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_GENERAL_KERNEL(
......@@ -60,4 +126,12 @@ PD_REGISTER_GENERAL_KERNEL(assign_array,
ALL_LAYOUT,
phi::AssignArrayKernel<phi::GPUContext>,
ALL_DTYPE) {}
PD_REGISTER_KERNEL(assign_value,
GPU,
ALL_LAYOUT,
phi::AssignValueKernel,
bool,
int,
float,
int64_t) {}
#endif
......@@ -14,6 +14,9 @@
#pragma once
#include <vector>
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -31,4 +34,11 @@ void AssignArrayKernel(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
std::vector<DenseTensor*> out);
template <typename T, typename Context>
void AssignValueKernel(const Context& dev_ctx,
const std::vector<int>& shape,
DataType dtype,
const std::vector<Scalar>& values,
DenseTensor* out);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature AssignValueOpArgumentMapping(
const ArgumentMappingContext& ctx) {
// Here we must use `dtype` attr to determine which attr to use, we can't
// judge by whether the attr is empty, some unittests will failed
int dtype = paddle::any_cast<int>(ctx.Attr("dtype"));
// heer we can't depend on the fluid proto::VarType, so we use the dtype enum
// value directly, If the enum value is updated, the code also needs to be
// updated here, but the probability of updating the enum value is very low
if (dtype == /*BOOL*/ 0) {
return KernelSignature(
"assign_value", {}, {"shape", "dtype", "bool_values"}, {"Out"});
} else if (dtype == /*INT32*/ 2) {
return KernelSignature(
"assign_value", {}, {"shape", "dtype", "int32_values"}, {"Out"});
} else if (dtype == /*FP32*/ 5) {
return KernelSignature(
"assign_value", {}, {"shape", "dtype", "fp32_values"}, {"Out"});
} else if (dtype == /*INT64*/ 3) {
return KernelSignature(
"assign_value", {}, {"shape", "dtype", "int64_values"}, {"Out"});
} else {
return KernelSignature("unregistered", {}, {}, {});
}
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(assign_value, phi::AssignValueOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册