未验证 提交 e559fe41 编写于 作者: Z zyfncg 提交者: GitHub

[Phi] Rename ScalarArray to IntArray (#40975)

* rename scalar_array to int_array

* update cmake

* fix conflict

* remove useless log
上级 2d69abd2
......@@ -320,7 +320,7 @@ EagerReducer::EagerReducer(
if (find_unused_vars_each_step_) {
global_used_vars_ = paddle::experimental::empty(
ScalarArray({static_cast<int32_t>(tensors_.size())}), DataType::INT32,
IntArray({static_cast<int32_t>(tensors_.size())}), DataType::INT32,
inner_place_);
}
}
......@@ -364,7 +364,7 @@ void EagerReducer::InitializeGroups(
// process the dense gradient.
InitializeDenseGroups(tensor_indices_, &group);
group.dense_contents_ = paddle::experimental::empty(
ScalarArray({group.all_length_}), group.dtype_, inner_place_);
IntArray({group.all_length_}), group.dtype_, inner_place_);
}
// map tensors to this group by VariableLocator
......@@ -403,7 +403,7 @@ void EagerReducer::InitializeDenseGroups(
p_group->length_.push_back(size);
// for concat operator
p_group->origin_shapes_.push_back(ScalarArray(tensor.shape()));
p_group->origin_shapes_.push_back(IntArray(tensor.shape()));
p_group->dense_tensors_.push_back(phi::DenseTensor());
const auto &dtype = tensor.dtype();
......
......@@ -35,8 +35,8 @@ namespace paddle {
namespace distributed {
using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::ScalarBase<paddle::experimental::Tensor>;
using ScalarArray =
paddle::experimental::ScalarArrayBase<paddle::experimental::Tensor>;
using IntArray =
paddle::experimental::IntArrayBase<paddle::experimental::Tensor>;
using Backend = paddle::experimental::Backend;
std::vector<std::vector<size_t>> Eager_AssignGroupBySize(
......@@ -52,7 +52,7 @@ class EagerGroup {
std::vector<phi::DenseTensor> dense_tensors_;
std::vector<int64_t> length_;
int64_t all_length_{0};
std::vector<ScalarArray> origin_shapes_;
std::vector<IntArray> origin_shapes_;
// Global indices of participating tensors in the group
std::vector<size_t> tensor_indices_;
......
......@@ -43,7 +43,7 @@ yaml_types_mapping = {
'Scalar(int64_t)' : 'paddle::experimental::Scalar',
'Scalar(float)' : 'paddle::experimental::Scalar',
'Scalar(double)' : 'paddle::experimental::Scalar',
'ScalarArray' : 'paddle::experimental::ScalarArray'
'IntArray' : 'paddle::experimental::IntArray'
}
......
......@@ -45,7 +45,7 @@ atype_to_parsing_function = {
"std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar",
"paddle::experimental::ScalarArray": "CastPyArg2ScalarArray",
"paddle::experimental::IntArray": "CastPyArg2IntArray",
"paddle::experimental::Place": "CastPyArg2Place",
"paddle::experimental::DataType": "CastPyArg2DataType",
}
......@@ -140,7 +140,7 @@ PYTHON_C_WRAPPER_TEMPLATE = \
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/include/strings_api.h"
#include "paddle/fluid/pybind/op_function_common.h"
......
......@@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/compat/arg_map_context.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/compat/op_utils.h"
......@@ -363,12 +363,12 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
auto attr_reader = ctx->Attrs();
for (size_t i = 0; i < attr_names.size(); ++i) {
auto attr_name = attr_names[i];
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
// When attr is a vector_tensor or tensor, transform it to ScalarArray
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
// When attr is a vector_tensor or tensor, transform it to IntArray
if (ctx->HasInputs(attr_name) || ctx->HasInput(attr_name)) {
const auto& infershape_inputs = ctx->GetInputVarPtrs(attr_name);
if (ctx->IsRuntime()) {
// If is in runtime, we will get tensor's value for ScalarArray
// If is in runtime, we will get tensor's value for IntArray
// and push it into attrs
std::vector<Variable*> vars;
vars.reserve(infershape_inputs.size());
......@@ -377,13 +377,13 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
}
if (infershape_inputs.size() != 1) {
infer_meta_context.EmplaceBackAttr(
std::move(experimental::MakePhiScalarArrayFromVarList(vars)));
std::move(experimental::MakePhiIntArrayFromVarList(vars)));
} else {
infer_meta_context.EmplaceBackAttr(
std::move(experimental::MakePhiScalarArrayFromVar(*vars[0])));
std::move(experimental::MakePhiIntArrayFromVar(*vars[0])));
}
} else {
// If is not in runtime, we will set default value(-1) for ScalarArray
// If is not in runtime, we will set default value(-1) for IntArray
std::vector<VarDesc*> vars;
vars.reserve(infershape_inputs.size());
for (size_t i = 0; i < infershape_inputs.size(); ++i) {
......@@ -400,7 +400,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
if (num_ele <= 0) {
PADDLE_THROW(platform::errors::Unimplemented(
"Invalid number for construct phi::ScalarArray, expected "
"Invalid number for construct phi::IntArray, expected "
"number > 0, but actually is %d. ",
num_ele));
}
......@@ -408,7 +408,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
} else {
num_ele = vars.size();
}
phi::ScalarArray tensor_attr(std::vector<int32_t>(num_ele, -1));
phi::IntArray tensor_attr(std::vector<int32_t>(num_ele, -1));
tensor_attr.SetFromTensor(true);
infer_meta_context.EmplaceBackAttr(std::move(tensor_attr));
}
......@@ -417,18 +417,18 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
infer_meta_context.EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
infer_meta_context.EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int))) {
infer_meta_context.EmplaceBackAttr(
phi::ScalarArray({BOOST_GET_CONST(int, attr)}));
phi::IntArray({BOOST_GET_CONST(int, attr)}));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to ScalarArray when "
"Unsupported cast op attribute `%s` to IntArray when "
"construct InferMetaContext.",
attr_name));
}
......
......@@ -33,8 +33,8 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"
......@@ -2198,24 +2198,24 @@ void OperatorWithKernel::BuildPhiKernelContext(
VLOG(4) << "Done outputs";
for (size_t i = 0; i < attr_names.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
auto attr_iter = Attrs().find(attr_names[i]);
if (attr_iter != Attrs().end()) { // shape is in the attribute
if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second))));
} else if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
BOOST_GET_CONST(std::vector<int32_t>, attr_iter->second))));
} else if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(int32_t))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
&BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
pt_kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(&BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to ScalarArray when "
"Unsupported cast op attribute `%s` to IntArray when "
"construct KernelContext.",
attr_names[i]));
}
......@@ -2223,10 +2223,10 @@ void OperatorWithKernel::BuildPhiKernelContext(
auto& ins_vector = ctx.inputs.at(attr_names[i]);
if (ins_vector.size() == 1) { // ShapeTensor
pt_kernel_context->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVar(*ins_vector.front())));
experimental::MakePhiIntArrayFromVar(*ins_vector.front())));
} else { // ShapeTensorList
pt_kernel_context->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVarList(ins_vector)));
pt_kernel_context->EmplaceBackAttr(
std::move(experimental::MakePhiIntArrayFromVarList(ins_vector)));
}
}
} else if (attr_defs[i].type_index ==
......
......@@ -19,8 +19,8 @@
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/imperative/infer_shape_context.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/utils/small_vector.h"
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
......
......@@ -361,26 +361,26 @@ void BuildDygraphPhiKernelContext(
}
for (size_t i = 0; i < attr_names.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
if (attrs.find(attr_names[i]) !=
attrs.end()) { // shape is in the attribute
auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
kernel_ctx->EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
kernel_ctx->EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int64_t))) {
kernel_ctx->EmplaceBackAttr(
std::move(phi::ScalarArray(&BOOST_GET_CONST(int64_t, attr), 1)));
std::move(phi::IntArray(&BOOST_GET_CONST(int64_t, attr), 1)));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int32_t))) {
kernel_ctx->EmplaceBackAttr(
std::move(phi::ScalarArray(&BOOST_GET_CONST(int32_t, attr), 1)));
std::move(phi::IntArray(&BOOST_GET_CONST(int32_t, attr), 1)));
} else if (attr_defs[i].type_index ==
std::type_index(typeid(std::vector<int32_t>))) {
const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
......@@ -395,15 +395,15 @@ void BuildDygraphPhiKernelContext(
auto& ins_vector = ins.at(attr_names[i]);
if (ins_vector.size() == 1) { // ShapeTensor
kernel_ctx->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVar(ins_vector[0]->Var())));
experimental::MakePhiIntArrayFromVar(ins_vector[0]->Var())));
} else { // ShapeTensorList
std::vector<framework::Variable*> variables;
variables.reserve(ins_vector.size());
for (const auto& var_base : ins_vector) {
variables.push_back(var_base->MutableVar());
}
kernel_ctx->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVarList(variables)));
kernel_ctx->EmplaceBackAttr(
std::move(experimental::MakePhiIntArrayFromVarList(variables)));
}
}
} else if (attr_defs[i].type_index ==
......
......@@ -21,7 +21,7 @@ limitations under the License. */
// only can include the headers in paddle/phi/api dirs
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
......@@ -354,7 +354,7 @@ class ReshapeKernel {
auto *shape_tensor = ctx.HasInput("Shape")
? ctx.Input<framework::LoDTensor>("Shape")
: nullptr;
phi::ScalarArray pt_scalar_shape;
phi::IntArray pt_scalar_shape;
if (list_new_shape_tensor.size() > 0) {
// have shape tensor
std::vector<phi::DenseTensor> pt_vec_shape;
......@@ -369,7 +369,7 @@ class ReshapeKernel {
pt_vec_shape.push_back(*tensor);
}
}
pt_scalar_shape = phi::ScalarArray(pt_vec_shape);
pt_scalar_shape = phi::IntArray(pt_vec_shape);
} else if (shape_tensor) {
phi::DenseTensor pt_shape;
if (platform::is_gpu_place(shape_tensor->place()) ||
......@@ -381,10 +381,10 @@ class ReshapeKernel {
} else {
pt_shape = *shape_tensor;
}
pt_scalar_shape = phi::ScalarArray(pt_shape);
pt_scalar_shape = phi::IntArray(pt_shape);
} else {
auto &shape_attr = ctx.Attr<std::vector<int>>("shape");
pt_scalar_shape = phi::ScalarArray(shape_attr);
pt_scalar_shape = phi::IntArray(shape_attr);
}
if (platform::is_cpu_place(ctx.GetPlace())) {
auto &dev_ctx = ctx.device_context<platform::CPUDeviceContext>();
......
......@@ -950,9 +950,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
return paddle::experimental::Scalar(1.0);
}
paddle::experimental::ScalarArray CastPyArg2ScalarArray(
PyObject* obj, const std::string& op_type, ssize_t arg_pos) {
// In case of ScalarArray, only two possible PyObjects:
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
// In case of IntArray, only two possible PyObjects:
// 1. list of int
// 2. Tensor
if (obj == Py_None) {
......@@ -968,12 +969,12 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
auto type_name = std::string(type->tp_name);
if (type_name == "list" || type_name == "tuple") {
std::vector<int> value = CastPyArg2Ints(obj, op_type, arg_pos);
return paddle::experimental::ScalarArray(value);
return paddle::experimental::IntArray(value);
} else if (type_name == "paddle.Tensor") {
paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::ScalarArray(value);
return paddle::experimental::IntArray(value);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......@@ -983,8 +984,8 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
// Fake a ScalarArray
return paddle::experimental::ScalarArray({1});
// Fake a IntArray
return paddle::experimental::IntArray({1});
}
paddle::framework::Scope* CastPyArg2ScopePtr(PyObject* obj) {
......
......@@ -13,8 +13,8 @@ limitations under the License. */
#include <Python.h>
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "pybind11/pybind11.h"
......@@ -150,8 +150,9 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::experimental::ScalarArray CastPyArg2ScalarArray(
PyObject* obj, const std::string& op_type, ssize_t arg_pos);
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::experimental::Place CastPyArg2Place(PyObject* obj,
const std::string& op_type,
......
......@@ -38,9 +38,9 @@
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"
......@@ -90,7 +90,7 @@ using ValueVariantType =
std::vector<const ::phi::DenseTensor*>,
std::vector<::phi::DenseTensor*>,
paddle::experimental::ScalarBase<::phi::DenseTensor>,
paddle::experimental::ScalarArrayBase<::phi::DenseTensor>,
paddle::experimental::IntArrayBase<::phi::DenseTensor>,
std::vector<::phi::MetaTensor*>,
::phi::MetaConfig,
paddle::experimental::Backend,
......
......@@ -32,9 +32,9 @@ limitations under the License. */
// phi common headers
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
// original custom op headers
#include "paddle/phi/api/ext/dispatch.h"
......
......@@ -64,7 +64,7 @@ Tensor copy_to_impl(const Tensor& x, Place place, bool blocking) {
}
std::vector<Tensor> split_impl(const Tensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis) {
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
......@@ -107,13 +107,13 @@ std::vector<Tensor> split_impl(const Tensor& x,
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::ScalarArray&,
const phi::IntArray&,
const phi::Scalar&,
std::vector<phi::DenseTensor*>&);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
(*kernel_fn)(*dev_ctx,
*dense_x,
phi::ScalarArray(num_or_sections),
phi::IntArray(num_or_sections),
phi::Scalar(axis),
dense_outs);
......
......@@ -15,9 +15,9 @@ limitations under the License. */
#pragma once
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
namespace paddle {
namespace experimental {
......@@ -25,7 +25,7 @@ namespace experimental {
Tensor copy_to_impl(const Tensor& x, Place place, bool blocking);
std::vector<Tensor> split_impl(const Tensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis);
} // namespace experimental
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/tensor_base.h"
......
......@@ -62,35 +62,34 @@ phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable) {
}
}
phi::ScalarArray MakePhiScalarArray(const paddle::framework::Tensor& src) {
phi::IntArray MakePhiIntArray(const paddle::framework::Tensor& src) {
return {src};
}
phi::ScalarArray MakePhiScalarArrayFromVar(
const framework::Variable& variable) {
phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable) {
auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU);
if (variable.IsType<framework::LoDTensor>()) {
const auto& tensor = variable.Get<framework::LoDTensor>();
if (!platform::is_same_place(tensor.place(), expected_place)) {
framework::LoDTensor tmp_tensor;
framework::TensorCopySync(tensor, expected_place, &tmp_tensor);
return MakePhiScalarArray(tmp_tensor);
return MakePhiIntArray(tmp_tensor);
} else {
return MakePhiScalarArray(tensor);
return MakePhiIntArray(tensor);
}
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport casting input `%s` type to ScalarArray when call pt "
"Unsupport casting input `%s` type to IntArray when call pt "
"kernel.",
framework::ToTypeName(variable.Type())));
}
}
// TODO(chentianyu03): Inplace with ScalarArray constructor
phi::ScalarArray MakePhiScalarArrayFromVarList(
// TODO(chentianyu03): Inplace with IntArray constructor
phi::IntArray MakePhiIntArrayFromVarList(
const std::vector<framework::Variable*>& variable_list) {
if (variable_list.size() == 0) {
return phi::ScalarArray();
return phi::IntArray();
}
auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU);
......@@ -137,7 +136,7 @@ phi::ScalarArray MakePhiScalarArrayFromVarList(
}
}
phi::ScalarArray result{vector_data};
phi::IntArray result{vector_data};
result.SetFromTensor(true);
return result;
......
......@@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_factory.h"
......@@ -33,13 +33,13 @@ namespace experimental {
std::unique_ptr<phi::DenseTensor> MakePhiDenseTensor(
const paddle::framework::Tensor& src);
phi::ScalarArray MakePhiScalarArray(const paddle::framework::Tensor& src);
phi::IntArray MakePhiIntArray(const paddle::framework::Tensor& src);
phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable);
phi::ScalarArray MakePhiScalarArrayFromVar(const framework::Variable& variable);
phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable);
phi::ScalarArray MakePhiScalarArrayFromVarList(
phi::IntArray MakePhiIntArrayFromVarList(
const std::vector<framework::Variable*>& variable_list);
} // namespace experimental
......
......@@ -21,25 +21,25 @@ namespace paddle {
namespace experimental {
template <typename T>
class ScalarArrayBase {
class IntArrayBase {
public:
// Constructor support implicit
ScalarArrayBase() = default;
IntArrayBase() = default;
ScalarArrayBase(const std::vector<int64_t>& vec) : array_(vec) {} // NOLINT
IntArrayBase(const std::vector<int64_t>& vec) : array_(vec) {} // NOLINT
ScalarArrayBase(const std::vector<int32_t>& vec) { // NOLINT
IntArrayBase(const std::vector<int32_t>& vec) { // NOLINT
array_.insert(array_.begin(), vec.begin(), vec.end());
}
ScalarArrayBase(std::initializer_list<int64_t> array_list)
IntArrayBase(std::initializer_list<int64_t> array_list)
: array_(array_list) {}
ScalarArrayBase(const int64_t* date_value, int64_t n) {
IntArrayBase(const int64_t* date_value, int64_t n) {
AssignData(date_value, n);
}
ScalarArrayBase(const int32_t* date_value, int64_t n) {
IntArrayBase(const int32_t* date_value, int64_t n) {
AssignData(date_value, n);
}
......@@ -48,7 +48,7 @@ class ScalarArrayBase {
void SetFromTensor(bool val) { is_from_tensor_ = val; }
// The Tensor must have one dim
ScalarArrayBase(const T& tensor) { // NOLINT
IntArrayBase(const T& tensor) { // NOLINT
is_from_tensor_ = true;
size_t n = tensor.numel();
array_.reserve(n);
......@@ -61,7 +61,7 @@ class ScalarArrayBase {
break;
default:
PD_THROW(
"Data type error. Currently, The data type of ScalarArrayBase "
"Data type error. Currently, The data type of IntArrayBase "
"only supports Tensor with int32 and int64, "
"but now received `",
tensor.dtype(),
......@@ -70,7 +70,7 @@ class ScalarArrayBase {
}
// The Tensor in vec must have only one element
ScalarArrayBase(const std::vector<T>& tensor_list) { // NOLINT
IntArrayBase(const std::vector<T>& tensor_list) { // NOLINT
is_from_tensor_ = true;
for (size_t i = 0; i < tensor_list.size(); ++i) {
......@@ -84,7 +84,7 @@ class ScalarArrayBase {
break;
default:
PD_THROW(
"Data type error. Currently, The data type of ScalarArrayBase "
"Data type error. Currently, The data type of IntArrayBase "
"only supports Tensor with int32 and int64, "
"but now received `",
data_type,
......@@ -94,8 +94,7 @@ class ScalarArrayBase {
}
template <typename OtherT>
ScalarArrayBase(const ScalarArrayBase<OtherT>& other)
: array_(other.GetData()) {}
IntArrayBase(const IntArrayBase<OtherT>& other) : array_(other.GetData()) {}
const std::vector<int64_t>& GetData() const { return array_; }
......@@ -120,8 +119,8 @@ class ScalarArrayBase {
bool is_from_tensor_{false};
};
using ScalarArray =
paddle::experimental::ScalarArrayBase<paddle::experimental::Tensor>;
using IntArray =
paddle::experimental::IntArrayBase<paddle::experimental::Tensor>;
} // namespace experimental
} // namespace paddle
......@@ -129,6 +128,6 @@ using ScalarArray =
namespace phi {
class DenseTensor;
using ScalarArray = paddle::experimental::ScalarArrayBase<DenseTensor>;
using IntArray = paddle::experimental::IntArrayBase<DenseTensor>;
} // namespace phi
......@@ -19,8 +19,8 @@ limitations under the License. */
#include <typeinfo>
#include <utility>
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/core/meta_tensor.h"
......@@ -192,7 +192,7 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(Backend);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const Scalar&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const IntArray&);
// TODO(chenweihang): support vector<MetaTensor> input later
......
......@@ -18,8 +18,8 @@
#include "paddle/phi/backends/custom/custom_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_context.h"
......@@ -250,7 +250,7 @@ struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> {
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int64_t>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const IntArray&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::string&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
......
......@@ -23,9 +23,7 @@ void AssignValueInferMeta(const std::vector<int>& shape,
out->set_dtype(dtype);
}
void CreateInferMeta(const ScalarArray& shape,
DataType dtype,
MetaTensor* out) {
void CreateInferMeta(const IntArray& shape, DataType dtype, MetaTensor* out) {
CreateInferMetaBase(shape.GetData(), dtype, DataLayout::NCHW, out);
}
......@@ -48,7 +46,7 @@ void EyeInferMeta(int64_t num_rows,
out->set_dtype(dtype);
}
void GaussianRandomInferMeta(const ScalarArray& shape,
void GaussianRandomInferMeta(const IntArray& shape,
float mean,
float std,
int seed,
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/meta_tensor.h"
namespace phi {
......@@ -34,7 +34,7 @@ void AssignValueInferMeta(const std::vector<int>& shape,
DataType dtype,
MetaTensor* out);
void CreateInferMeta(const ScalarArray& shape, DataType dtype, MetaTensor* out);
void CreateInferMeta(const IntArray& shape, DataType dtype, MetaTensor* out);
void CreateInferMetaBase(const std::vector<int64_t>& shape,
DataType dtype,
......@@ -46,7 +46,7 @@ void EyeInferMeta(int64_t num_rows,
DataType dtype,
MetaTensor* out);
void GaussianRandomInferMeta(const ScalarArray& shape,
void GaussianRandomInferMeta(const IntArray& shape,
float mean,
float std,
int seed,
......
......@@ -16,7 +16,7 @@ limitations under the License. */
namespace phi {
namespace strings {
void CreateInferMeta(const ScalarArray& shape, MetaTensor* out) {
void CreateInferMeta(const IntArray& shape, MetaTensor* out) {
const auto& out_dims = phi::make_ddim(shape.GetData());
out->set_dims(out_dims);
out->set_dtype(DataType::PSTRING);
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/core/tensor_meta.h"
......@@ -22,7 +22,7 @@ namespace phi {
namespace strings {
void CreateInferMeta(const std::vector<int64_t>& shape, MetaTensor* out);
void CreateInferMeta(const ScalarArray& shape, MetaTensor* out);
void CreateInferMeta(const IntArray& shape, MetaTensor* out);
} // namespace strings
} // namespace phi
......@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/core/tensor_meta.h"
......
......@@ -1173,7 +1173,7 @@ void PadInferMeta(const MetaTensor& input,
}
void Pad3dInferMeta(const MetaTensor& x,
const ScalarArray& paddings_scalar_array,
const IntArray& paddings_int_array,
const std::string& mode,
float value,
const std::string& data_format,
......@@ -1189,21 +1189,21 @@ void Pad3dInferMeta(const MetaTensor& x,
std::vector<int64_t> out_dims(x_dim.size());
out_dims[0] = x_dim[0];
if (paddings_scalar_array.FromTensor()) {
if (paddings_int_array.FromTensor()) {
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(
paddings_scalar_array.GetData().size(),
paddings_int_array.GetData().size(),
6,
errors::InvalidArgument("Shape of Input(Paddings) should be equal to "
"[6], but received [%d].",
paddings_scalar_array.GetData().size()));
paddings_int_array.GetData().size()));
}
out_dims[1] = x_dim[1];
out_dims[2] = x_dim[2];
out_dims[3] = x_dim[3];
out_dims[4] = x_dim[4];
} else {
auto paddings = paddings_scalar_array.GetData();
auto paddings = paddings_int_array.GetData();
PADDLE_ENFORCE_EQ(
paddings.size(),
......@@ -1592,7 +1592,7 @@ void ReduceInferMetaBase(const MetaTensor& x,
}
void ReshapeInferMeta(const MetaTensor& x,
const ScalarArray& shape,
const IntArray& shape,
MetaTensor* out,
MetaConfig config) {
auto& shape_data = shape.GetData();
......@@ -1612,7 +1612,7 @@ void ReshapeInferMeta(const MetaTensor& x,
}
void ReshapeWithXShapeInferMeta(const MetaTensor& x,
const ScalarArray& shape,
const IntArray& shape,
MetaTensor* out,
MetaTensor* xshape,
MetaConfig config) {
......@@ -1659,7 +1659,7 @@ void ReverseInferMeta(const MetaTensor& x,
}
void RollInferMeta(const MetaTensor& x,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
MetaTensor* out) {
auto shifts_data = shifts.GetData();
......@@ -1758,7 +1758,7 @@ void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out) {
}
void SplitInferMeta(const MetaTensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis,
std::vector<MetaTensor*> out,
MetaConfig config) {
......@@ -1924,9 +1924,9 @@ void SqueezeInferMeta(const MetaTensor& x,
void StridedSliceInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
MetaTensor* out,
......@@ -1968,7 +1968,7 @@ void StridedSliceInferMeta(const MetaTensor& x,
}
auto tensor_input = false;
auto HasInput = [](const ScalarArray& arr) { return arr.FromTensor(); };
auto HasInput = [](const IntArray& arr) { return arr.FromTensor(); };
if (HasInput(starts) || HasInput(ends) || HasInput(strides)) {
tensor_input = true;
}
......@@ -2090,7 +2090,7 @@ void SumRawInferMeta(const MetaTensor& x,
}
void TileInferMeta(const MetaTensor& x,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
MetaTensor* out,
MetaConfig config) {
#define MAX_RANK_SUPPORTED 6
......@@ -2553,7 +2553,7 @@ void UnfoldInferMeta(const MetaTensor& x,
}
void UnsqueezeInferMeta(const MetaTensor& x,
const ScalarArray& axes,
const IntArray& axes,
MetaTensor* xshape,
MetaTensor* out,
MetaConfig config) {
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#pragma once
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/meta_tensor.h"
namespace phi {
......@@ -185,7 +185,7 @@ void PadInferMeta(const MetaTensor& input,
MetaConfig config = MetaConfig());
void Pad3dInferMeta(const MetaTensor& x,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float value,
const std::string& data_format,
......@@ -238,12 +238,12 @@ void ReduceInferMetaBase(const MetaTensor& x,
MetaTensor* out);
void ReshapeInferMeta(const MetaTensor& x,
const ScalarArray& shape,
const IntArray& shape,
MetaTensor* out,
MetaConfig config = MetaConfig());
void ReshapeWithXShapeInferMeta(const MetaTensor& x,
const ScalarArray& shape,
const IntArray& shape,
MetaTensor* out,
MetaTensor* xshape,
MetaConfig config = MetaConfig());
......@@ -253,7 +253,7 @@ void ReverseInferMeta(const MetaTensor& x,
MetaTensor* out);
void RollInferMeta(const MetaTensor& x,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
MetaTensor* out);
......@@ -274,7 +274,7 @@ void SizeInferMeta(const MetaTensor& input, MetaTensor* out);
void SoftmaxInferMeta(const MetaTensor& x, int axis, MetaTensor* out);
void SplitInferMeta(const MetaTensor& x_meta,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis,
std::vector<MetaTensor*> out,
MetaConfig config = MetaConfig());
......@@ -286,9 +286,9 @@ void SqueezeInferMeta(const MetaTensor& x,
void StridedSliceInferMeta(const MetaTensor& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
MetaTensor* out,
......@@ -308,7 +308,7 @@ void SumRawInferMeta(const MetaTensor& x,
MetaTensor* out);
void TileInferMeta(const MetaTensor& x,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
MetaTensor* out,
MetaConfig config = MetaConfig());
......@@ -361,7 +361,7 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaConfig config = MetaConfig());
void UnsqueezeInferMeta(const MetaTensor& x,
const ScalarArray& axes,
const IntArray& axes,
MetaTensor* xshape,
MetaTensor* out,
MetaConfig config = MetaConfig());
......
......@@ -31,7 +31,7 @@ void FullValue(const Context& dev_ctx, DenseTensor* tensor, VType val) {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
DenseTensor* out) {
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void GaussianRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
float mean,
float std,
int seed,
......
......@@ -362,7 +362,7 @@ template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -379,7 +379,7 @@ void Pad3DNDHWC(const T* in_data,
template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -25,7 +25,7 @@ template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
......@@ -49,7 +49,7 @@ template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
......
......@@ -24,7 +24,7 @@ template <typename T, typename Context>
void RollGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* x_grad) {
std::vector<T> out_vec;
......
......@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context>
void RollKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* out) {
std::vector<T> out_vec;
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......
......@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context>
void SplitKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis_scalar,
std::vector<DenseTensor*> outs) {
// need to infershape output
......
......@@ -41,14 +41,14 @@ void TriangularSolveKernel(const Context& dev_ctx,
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
ScalarArray x_bst_dims(x_bst_dims_vec);
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
ScalarArray y_bst_dims(y_bst_dims_vec);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// Calculate use blas library
......
......@@ -44,7 +44,7 @@ inline void UniformRealDistribution(phi::dtype::bfloat16 *data,
template <typename T, typename Context>
void UniformRandomRawKernel(const Context &dev_ctx,
const ScalarArray &shape,
const IntArray &shape,
DataType dtype,
float min,
float max,
......@@ -86,7 +86,7 @@ void UniformRandomRawKernel(const Context &dev_ctx,
template <typename T, typename Context>
void UniformRandomKernel(const Context &dev_ctx,
const ScalarArray &shape,
const IntArray &shape,
DataType dtype,
float min,
float max,
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void EmptyKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
#include "paddle/phi/infermeta/nullary.h"
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void EmptyKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
DenseTensor* out);
......@@ -43,7 +43,7 @@ DenseTensor Empty(const Context& dev_ctx, DenseTensorMeta&& meta) {
}
template <typename T, typename Context>
DenseTensor Empty(const Context& dev_ctx, const ScalarArray& shape) {
DenseTensor Empty(const Context& dev_ctx, const IntArray& shape) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
DataType dtype = paddle::experimental::CppTypeToDataType<T>::Type();
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
......@@ -24,7 +24,7 @@ template <typename T, typename Context>
void ExpandGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* in_grad);
} // namespace phi
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void ExpandKernel(const Context& ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out);
} // namepsace phi
......@@ -16,8 +16,8 @@
#include <vector>
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/infermeta/nullary.h"
......@@ -27,7 +27,7 @@ namespace phi {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
DenseTensor* out);
......@@ -53,7 +53,7 @@ void FullBatchSizeLikeKernel(const Context& dev_ctx,
template <typename T, typename Context>
void Full(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DenseTensor* out) {
FullKernel<T, Context>(dev_ctx,
......@@ -65,7 +65,7 @@ void Full(const Context& dev_ctx,
template <typename T, typename Context>
DenseTensor Full(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
......
......@@ -414,7 +414,7 @@ void SelectKernel(const KPDevice &dev_ctx,
// 1.2 alloc tmp data for CoutBlock
const int size_count_block = need_grids + 1;
std::vector<int> dims_vec = {size_count_block * 2};
ScalarArray dims_array(dims_vec);
IntArray dims_array(dims_vec);
DenseTensor count_mem = phi::Empty<CT, KPDevice>(dev_ctx, dims_array);
CT *count_data = count_mem.data<CT>();
// 1.3 launch CountKernl
......
......@@ -178,9 +178,9 @@ template <typename Context, typename T, size_t D>
void StridedSliceCompute(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* out) {
......@@ -295,9 +295,9 @@ template <typename Context, typename T, size_t D>
void StridedSliceCompute(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> out) {
......@@ -446,9 +446,9 @@ void StridedSliceGradCompute(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* x_grad) {
......@@ -530,9 +530,9 @@ void StridedSliceGradCompute(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
const std::vector<const DenseTensor*>& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> x_grad) {
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void GaussianRandomKernel(const Context& ctx,
const ScalarArray& shape,
const IntArray& shape,
float mean,
float std,
int seed,
......
......@@ -35,7 +35,7 @@ struct FullFuctor {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
DenseTensor* out) {
......
......@@ -58,7 +58,7 @@ struct GaussianGenerator {
template <typename T, typename Context>
void GaussianRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
float mean,
float std,
int seed,
......
......@@ -243,8 +243,7 @@ void MultinomialKernel(const Context& dev_ctx,
dev_ctx, rand, -1, true, false, 3 /*proto::VarType::INT64*/, out);
} else {
std::vector<int64_t> out_dim_vec = vectorize<int64_t>(out->dims());
DenseTensor value =
Empty<T, Context>(dev_ctx, ScalarArray(out_dim_vec));
DenseTensor value = Empty<T, Context>(dev_ctx, IntArray(out_dim_vec));
TopkKernel<T, Context>(
dev_ctx, rand, Scalar(num_samples), -1, true, true, &value, out);
}
......
......@@ -330,7 +330,7 @@ template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -327,7 +327,7 @@ __global__ void Pad3DCircularNDHWC(const int nthreads,
template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -31,7 +31,7 @@ template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
......@@ -74,7 +74,7 @@ template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
......
......@@ -89,12 +89,12 @@ void RandpermRawKernel(
RandintKernel<int, Context>(dev_ctx,
std::numeric_limits<int>::min(),
std::numeric_limits<int>::max(),
ScalarArray({n}),
IntArray({n}),
phi::DataType::INT32,
&key);
DenseTensor key_out = Empty<int, Context>(dev_ctx, ScalarArray({n}));
DenseTensor key_out = Empty<int, Context>(dev_ctx, IntArray({n}));
DenseTensor range = Empty<T, Context>(dev_ctx, ScalarArray({n}));
DenseTensor range = Empty<T, Context>(dev_ctx, IntArray({n}));
T* range_data = range.data<T>();
funcs::ForRange<Context> for_range(dev_ctx, n);
for_range([range_data] __device__(size_t idx) {
......
......@@ -26,7 +26,7 @@ template <typename T, typename Context>
void RollGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* x_grad) {
auto* in_data = out_grad.data<T>();
......
......@@ -26,7 +26,7 @@ using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename Context>
void RollKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* out) {
auto* in_data = x.data<T>();
......
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void SplitKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis_scalar,
std::vector<DenseTensor*> outs) {
// need to infershape output
......
......@@ -45,14 +45,14 @@ void TriangularSolveKernel(const Context& dev_ctx,
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
ScalarArray x_bst_dims(x_bst_dims_vec);
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
ScalarArray y_bst_dims(y_bst_dims_vec);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// calculate use cublas library
......
......@@ -93,7 +93,7 @@ struct UniformGeneratorOffset {
template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......@@ -140,7 +140,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......
......@@ -46,8 +46,8 @@ void CholeskySolveGradKernel(const Context& dev_ctx,
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
ScalarArray x_bst_dims(x_bst_dims_vec);
ScalarArray y_bst_dims(y_bst_dims_vec);
IntArray x_bst_dims(x_bst_dims_vec);
IntArray y_bst_dims(y_bst_dims_vec);
// Tensor broadcast to temp 'y_bst'
DenseTensor y_bst = phi::Empty<T, Context>(dev_ctx, y_bst_dims);
......
......@@ -49,8 +49,8 @@ void CholeskySolveKernel(const Context& dev_ctx,
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
ScalarArray x_bst_dims(x_bst_dims_vec);
ScalarArray y_bst_dims(y_bst_dims_vec);
IntArray x_bst_dims(x_bst_dims_vec);
IntArray y_bst_dims(y_bst_dims_vec);
DenseTensor y_bst = phi::Empty<T, Context>(dev_ctx, y_bst_dims);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, &y_bst);
......@@ -79,8 +79,7 @@ void CholeskySolveKernel(const Context& dev_ctx,
int N = static_cast<int>(x_bst_dims_vec[x_bst_ndim - 1]);
int batchsize = product(phi::slice_ddim(x_bst.dims(), 0, x_bst_ndim - 2));
DenseTensor info =
phi::Empty<int, Context>(dev_ctx, ScalarArray({batchsize}));
DenseTensor info = phi::Empty<int, Context>(dev_ctx, IntArray({batchsize}));
int* info_data = info.data<int>();
CholeskySolveFunctor<T, Context> functor;
......
......@@ -50,7 +50,7 @@ template <typename T, typename Context>
void ExpandGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* in_grad) {
auto expand_shape = shape.GetData();
auto x_dims = x.dims();
......
......@@ -27,7 +27,7 @@ using Tensor = DenseTensor;
template <typename Context, typename T, int Rank>
void Expand(const Context& ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out) {
auto in_dims = x.dims();
auto expand_shape = shape.GetData();
......@@ -107,7 +107,7 @@ void Expand(const Context& ctx,
template <typename T, typename Context>
void ExpandKernel(const Context& ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out) {
auto rank = x.dims().size();
PADDLE_ENFORCE_GE(
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/copy_kernel.h"
......@@ -48,9 +48,9 @@ inline void GetOffsets(const DDim& big_dim,
template <typename T, typename Context, size_t RANK>
void SetValueGradImpl(const Context& dev_ctx,
const DenseTensor& out_grad,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......@@ -249,9 +249,9 @@ void SetValueGradImpl(const Context& dev_ctx,
template <typename T, typename Context>
void SetValueGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......
......@@ -14,8 +14,8 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/copy_kernel.h"
......@@ -73,9 +73,9 @@ template <typename T, typename Context, size_t RANK>
void SetValueImpl(const Context& dev_ctx,
const DenseTensor& in,
const DenseTensor& value,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......@@ -134,9 +134,9 @@ void SetValueImpl(const Context& dev_ctx,
Copy(dev_ctx, in, place, false, out);
DenseTensor slice_tensor =
Empty<T>(dev_ctx, ScalarArray{slice_dims.Get(), slice_dims.size()});
Empty<T>(dev_ctx, IntArray{slice_dims.Get(), slice_dims.size()});
DenseTensor pad_tensor =
Empty<T>(dev_ctx, ScalarArray{in_dims.Get(), in_dims.size()});
Empty<T>(dev_ctx, IntArray{in_dims.Get(), in_dims.size()});
auto pad_e = EigenTensor<T, RANK>::From(pad_tensor, in_dims);
auto out_e = EigenTensor<T, RANK>::From(*out);
......@@ -211,9 +211,9 @@ template <typename T, typename Context>
void SetTensorValueKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& value,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......@@ -302,9 +302,9 @@ void SetTensorValueKernel(const Context& dev_ctx,
template <typename T, typename Context>
void SetValueKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......
......@@ -274,8 +274,8 @@ void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const ScalarArray& starts_arr,
const ScalarArray& ends_arr,
const IntArray& starts_arr,
const IntArray& ends_arr,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
......
......@@ -110,8 +110,8 @@ template <typename T, typename Context>
void SliceRawKernel(const Context& ctx,
const DenseTensor& input,
const std::vector<int64_t>& axes,
const ScalarArray& starts_arr,
const ScalarArray& ends_arr,
const IntArray& starts_arr,
const IntArray& ends_arr,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* out) {
......
......@@ -24,9 +24,9 @@ void StridedSliceGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* x_grad) {
......@@ -62,9 +62,9 @@ void StridedSliceArrayGradKernel(
const std::vector<const DenseTensor*>& x,
const std::vector<const DenseTensor*>& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> x_grad) {
......
......@@ -23,9 +23,9 @@ template <typename T, typename Context>
void StridedSliceKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* out) {
......@@ -58,9 +58,9 @@ template <typename T, typename Context>
void StridedSliceArrayKernel(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> out) {
......
......@@ -53,7 +53,7 @@ template <typename T, typename Context>
void TileGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
DenseTensor* x_grad) {
auto x_dims = x.dims();
auto vec_x_dims = phi::vectorize<int>(x_dims);
......
......@@ -85,7 +85,7 @@ void Tile(const Context& dev_ctx,
template <typename T, typename Context>
void TileKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
DenseTensor* out) {
auto rank = x.dims().size();
auto& repeat_times_data = repeat_times.GetData();
......
......@@ -44,7 +44,7 @@ void TriangularSolveGradKernel(const Context& dev_ctx,
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
ScalarArray y_bst_dims_array(y_bst_dims_vec);
IntArray y_bst_dims_array(y_bst_dims_vec);
DenseTensor dy_bst = phi::Empty<T, Context>(dev_ctx, y_bst_dims_array);
if (dy) {
// calculate x's conjugate for complex
......@@ -71,7 +71,7 @@ void TriangularSolveGradKernel(const Context& dev_ctx,
}
}
ScalarArray x_bst_dims_array(x_bst_dims_vec);
IntArray x_bst_dims_array(x_bst_dims_vec);
DenseTensor dx_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims_array);
if (dx) {
// calculate x's conjugate for complex
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void UnsqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& axes,
const IntArray& axes,
DenseTensor* xshape,
DenseTensor* out) {
auto x_dims = x.dims();
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& paddings,
const IntArray& paddings,
const std::string& mode,
float pad_value,
const std::string& data_format,
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
DenseTensor* out);
......@@ -31,7 +31,7 @@ template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out);
......
......@@ -24,7 +24,7 @@ namespace phi {
template <typename Context>
void ReshapeKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out) {
MetaTensor meta_out(out);
InferMetaFromVecValue(x, shape.GetData(), &meta_out);
......@@ -44,7 +44,7 @@ void ReshapeKernel(const Context& dev_ctx,
template <typename Context>
void ReshapeWithXShape(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out,
DenseTensor* xshape) {
ReshapeKernel(dev_ctx, x, shape, out);
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/kernels/empty_kernel.h"
......@@ -24,13 +24,13 @@ namespace phi {
template <typename Context>
void ReshapeKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out);
template <typename Context>
void ReshapeWithXShape(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shape,
const IntArray& shape,
DenseTensor* out,
DenseTensor* xshape);
......@@ -41,7 +41,7 @@ DenseTensor Reshape(const Context& dev_ctx,
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
InferMetaFromVecValue(x, shape, &meta_out);
ReshapeKernel<Context>(dev_ctx, x, ScalarArray(shape), &dense_out);
ReshapeKernel<Context>(dev_ctx, x, IntArray(shape), &dense_out);
return dense_out;
}
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void RollGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* x_grad);
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void RollKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& shifts,
const IntArray& shifts,
const std::vector<int64_t>& axis,
DenseTensor* out);
......
......@@ -29,7 +29,7 @@ namespace sr {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
SelectedRows* out) {
......
......@@ -14,8 +14,8 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/selected_rows.h"
namespace phi {
......@@ -23,7 +23,7 @@ namespace sr {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
SelectedRows* out);
......
......@@ -24,7 +24,7 @@ namespace sr {
template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......@@ -47,7 +47,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/selected_rows.h"
namespace phi {
......@@ -22,7 +22,7 @@ namespace sr {
template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -22,9 +22,9 @@ namespace phi {
template <typename T, typename Context>
void SetValueGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......
......@@ -14,8 +14,8 @@
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/infermeta/unary.h"
......@@ -25,9 +25,9 @@ template <typename T, typename Context>
void SetTensorValueKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& value,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......@@ -36,9 +36,9 @@ void SetTensorValueKernel(const Context& dev_ctx,
template <typename T, typename Context>
void SetValueKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& steps,
const IntArray& starts,
const IntArray& ends,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -24,8 +24,8 @@ void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad);
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,8 +23,8 @@ template <typename T, typename Context>
void SliceRawKernel(const Context& ctx,
const DenseTensor& input,
const std::vector<int64_t>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* out);
......
......@@ -16,8 +16,8 @@
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/kernels/empty_kernel.h"
......@@ -26,14 +26,14 @@ namespace phi {
template <typename T, typename Context>
void SplitKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis,
std::vector<DenseTensor*> out);
template <typename T, typename Context>
std::vector<DenseTensor> Split(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& num_or_sections,
const IntArray& num_or_sections,
const Scalar& axis) {
size_t out_number;
if (num_or_sections.GetData().size() == 1) {
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -24,9 +24,9 @@ void StridedSliceGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* x_grad);
......@@ -37,9 +37,9 @@ void StridedSliceArrayGradKernel(
const std::vector<const DenseTensor*>& x,
const std::vector<const DenseTensor*>& out_grad,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> x_grad);
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,9 +23,9 @@ template <typename T, typename Context>
void StridedSliceKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
DenseTensor* out);
......@@ -34,9 +34,9 @@ template <typename T, typename Context>
void StridedSliceArrayKernel(const Context& dev_ctx,
const std::vector<const DenseTensor*>& x,
const std::vector<int>& axes,
const ScalarArray& starts,
const ScalarArray& ends,
const ScalarArray& strides,
const IntArray& starts,
const IntArray& ends,
const IntArray& strides,
const std::vector<int>& infer_flags,
const std::vector<int>& decrease_axis,
std::vector<DenseTensor*> out);
......
......@@ -22,7 +22,7 @@ namespace strings {
template <typename Context>
void EmptyKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
StringTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
dev_ctx.template Alloc<dtype::pstring>(out);
......
......@@ -16,7 +16,7 @@
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/string_tensor.h"
#include "paddle/phi/infermeta/strings/nullary.h"
#include "paddle/phi/infermeta/strings/unary.h"
......@@ -26,7 +26,7 @@ namespace strings {
template <typename Context>
void EmptyKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
StringTensor* out);
template <typename Context>
......@@ -48,7 +48,7 @@ StringTensor Empty(const Context& dev_ctx) {
}
template <typename Context>
StringTensor Empty(const Context& dev_ctx, const ScalarArray& shape) {
StringTensor Empty(const Context& dev_ctx, const IntArray& shape) {
StringTensor string_out;
MetaTensor meta_out(&string_out);
phi::strings::CreateInferMeta(shape, &meta_out);
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#define MAX_RANK_SUPPORTED 6
......@@ -25,7 +25,7 @@ template <typename T, typename Context>
void TileGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
DenseTensor* x_grad);
} // namespace phi
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#define MAX_RANK_SUPPORTED 6
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void TileKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& repeat_times,
const IntArray& repeat_times,
DenseTensor* out);
} // namespace phi
......@@ -17,7 +17,7 @@
#include <limits>
#include <random>
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
#include "paddle/phi/infermeta/nullary.h"
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
DataType dtype,
float min,
float max,
......
......@@ -15,7 +15,7 @@
#pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void UnsqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const ScalarArray& axes,
const IntArray& axes,
DenseTensor* xshape,
DenseTensor* out);
} // namespace phi
......@@ -55,7 +55,7 @@ void FullValueXPU(const Context& dev_ctx, DenseTensor* tensor, VType val) {
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const IntArray& shape,
const Scalar& val,
DataType dtype,
DenseTensor* out) {
......
......@@ -20,8 +20,8 @@
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/unary.h"
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册