From 4e23ba325db40a212ed30165143bcb5301bd106c Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Fri, 21 Jan 2022 14:55:00 +0800 Subject: [PATCH] [PTen]Migrate Dim and DDim from paddle::framework into pten namespace (#39053) * Migrate Dim and DDim from paddle::framework into pten namespace * fix paddle::framework::Array * fix framework::Array --- paddle/fluid/framework/CMakeLists.txt | 8 - paddle/fluid/framework/ddim.h | 230 +--------------- paddle/fluid/framework/ddim_test.cc | 84 ------ paddle/fluid/framework/dim.h | 82 +----- .../amp/check_finite_and_unscale_op.h | 2 +- .../operators/amp/update_loss_scaling_op.h | 2 +- paddle/fluid/operators/bce_loss_op.cu | 2 +- paddle/fluid/operators/bernoulli_op.h | 2 +- paddle/fluid/operators/bilateral_slice_op.h | 2 +- paddle/fluid/operators/bincount_op.cu | 2 +- paddle/fluid/operators/deformable_conv_func.h | 2 +- paddle/fluid/operators/dequantize_log_op.cu | 2 +- .../fluid/operators/detection/box_clip_op.cu | 2 +- .../detection/sigmoid_focal_loss_op.cu | 2 +- .../fluid/operators/detection/yolo_box_op.h | 2 +- paddle/fluid/operators/distribution_helper.h | 2 +- .../elementwise/elementwise_functor.h | 34 +-- paddle/fluid/operators/fake_quantize_op.h | 2 +- paddle/fluid/operators/grid_sampler_op.h | 2 +- paddle/fluid/operators/histogram_op.cu | 2 +- paddle/fluid/operators/huber_loss_op.h | 2 +- paddle/fluid/operators/interpolate_op.h | 2 +- paddle/fluid/operators/interpolate_v2_op.h | 2 +- .../kernel_primitives/datamover_primitives.h | 3 +- paddle/fluid/operators/kldiv_loss_op.h | 2 +- paddle/fluid/operators/lstm_unit_op.cu | 2 +- paddle/fluid/operators/math.h | 2 +- paddle/fluid/operators/math/algorithm.h | 2 +- .../fluid/operators/math/complex_functors.h | 2 +- paddle/fluid/operators/math/cos_sim_functor.h | 2 +- paddle/fluid/operators/math/cross_entropy.h | 2 +- paddle/fluid/operators/math/depthwise_conv.h | 2 +- .../math/detail/activation_functions.h | 2 +- .../fluid/operators/math/detail/gru_kernel.h | 2 +- .../fluid/operators/math/detail/lstm_kernel.h | 2 +- paddle/fluid/operators/math/maxouting.h | 2 +- paddle/fluid/operators/math/pooling.h | 2 +- .../fluid/operators/modified_huber_loss_op.cu | 2 +- .../fluid/operators/modified_huber_loss_op.h | 2 +- paddle/fluid/operators/multinomial_op.h | 2 +- paddle/fluid/operators/nll_loss_op.cu | 2 +- paddle/fluid/operators/roll_op.cu | 14 +- .../sigmoid_cross_entropy_with_logits_op.cu | 2 +- paddle/fluid/operators/smooth_l1_loss_op.h | 2 +- paddle/fluid/operators/unstack_op.h | 1 - paddle/fluid/platform/aligned_vector.h | 2 +- paddle/fluid/platform/eigen_ext.h | 2 +- paddle/fluid/platform/transform.h | 2 +- paddle/fluid/platform/transform_test.cu | 2 +- paddle/pten/api/include/tensor.h | 10 +- paddle/pten/api/lib/tensor.cc | 6 +- paddle/pten/core/CMakeLists.txt | 9 + paddle/{fluid/framework => pten/core}/array.h | 10 +- paddle/{fluid/framework => pten/core}/ddim.cc | 85 +++--- paddle/pten/core/ddim.h | 257 ++++++++++++++++++ paddle/pten/core/ddim_test.cc | 83 ++++++ paddle/pten/core/dim.h | 100 +++++++ .../framework => pten/core}/dim_test.cu | 41 +-- .../platform => pten/core}/hostdevice.h | 5 +- paddle/pten/core/tensor_base.h | 4 +- paddle/pten/core/tensor_meta.h | 4 +- .../core}/unroll_array_ops.h | 8 +- .../core}/unroll_array_ops_test.cc | 8 +- paddle/pten/infermeta/binary.cc | 8 +- paddle/pten/infermeta/nullary.cc | 4 +- paddle/pten/infermeta/unary.cc | 28 +- paddle/pten/kernels/cpu/elementwise.h | 4 +- paddle/pten/kernels/cpu/reduce.h | 4 +- paddle/pten/kernels/empty_kernel.cc | 2 +- paddle/pten/kernels/flatten_grad_kernel.cc | 3 +- paddle/pten/kernels/funcs/common_shape.h | 2 +- paddle/pten/kernels/funcs/elementwise_base.h | 26 +- .../pten/kernels/funcs/elementwise_functor.h | 2 +- paddle/pten/kernels/funcs/transpose.cc | 6 +- paddle/pten/kernels/funcs/transpose.cu | 6 +- paddle/pten/kernels/funcs/transpose.h | 2 +- paddle/pten/kernels/gpu/elementwise.h | 37 ++- paddle/pten/kernels/gpu/reduce.h | 20 +- .../pten/kernels/impl/dot_grad_kernel_impl.h | 4 +- paddle/pten/kernels/impl/full_kernel_impl.h | 2 +- .../kernels/impl/matmul_grad_kernel_impl.h | 4 +- paddle/pten/kernels/impl/matmul_kernel_impl.h | 10 +- paddle/pten/tests/api/test_cast_api.cc | 2 +- paddle/pten/tests/api/test_conj_api.cc | 2 +- paddle/pten/tests/api/test_dot_api.cc | 2 +- paddle/pten/tests/api/test_elementwise_api.cc | 2 +- paddle/pten/tests/api/test_empty_api.cc | 2 +- paddle/pten/tests/api/test_fill_api.cc | 2 +- paddle/pten/tests/api/test_flatten_api.cc | 2 +- paddle/pten/tests/api/test_matmul_api.cc | 2 +- paddle/pten/tests/api/test_mean_api.cc | 2 +- paddle/pten/tests/api/test_reshape_api.cc | 2 +- paddle/pten/tests/api/test_scale_api.cc | 2 +- paddle/pten/tests/api/test_sum_api.cc | 2 +- paddle/pten/tests/api/test_to_api.cc | 2 +- .../pten/tests/kernels/test_cast_dev_api.cc | 2 +- .../pten/tests/kernels/test_conj_dev_api.cc | 2 +- .../pten/tests/kernels/test_copy_dev_api.cc | 2 +- .../tests/kernels/test_creation_dev_api.cc | 2 +- paddle/pten/tests/kernels/test_dot_dev_api.cc | 2 +- .../tests/kernels/test_elementwise_dev_api.cc | 2 +- .../tests/kernels/test_flatten_dev_api.cc | 2 +- .../pten/tests/kernels/test_matmul_dev_api.cc | 2 +- .../pten/tests/kernels/test_mean_dev_api.cc | 2 +- .../tests/kernels/test_reshape_dev_api.cc | 2 +- .../pten/tests/kernels/test_scale_dev_api.cc | 2 +- paddle/pten/tests/kernels/test_sum_dev_api.cc | 2 +- 107 files changed, 734 insertions(+), 658 deletions(-) delete mode 100644 paddle/fluid/framework/ddim_test.cc rename paddle/{fluid/framework => pten/core}/array.h (94%) rename paddle/{fluid/framework => pten/core}/ddim.cc (77%) create mode 100644 paddle/pten/core/ddim.h create mode 100644 paddle/pten/core/ddim_test.cc create mode 100644 paddle/pten/core/dim.h rename paddle/{fluid/framework => pten/core}/dim_test.cu (62%) rename paddle/{fluid/platform => pten/core}/hostdevice.h (89%) rename paddle/{fluid/framework => pten/core}/unroll_array_ops.h (96%) rename paddle/{fluid/framework => pten/core}/unroll_array_ops_test.cc (92%) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 902943d14f..83e5c1c179 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -55,14 +55,6 @@ proto_library(trainer_desc_proto SRCS trainer_desc.proto DEPS framework_proto cc_library(string_array SRCS string_array.cc DEPS utf8proc) -cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) -cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) -if(WITH_GPU) - nv_test(dim_test SRCS dim_test.cu DEPS ddim) -elseif(WITH_ROCM) - hip_test(dim_test SRCS dim_test.cu DEPS ddim) -endif() -cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) cc_library(data_type SRCS data_type.cc DEPS framework_proto ddim device_context) cc_test(data_type_test SRCS data_type_test.cc DEPS data_type place tensor) if(WITH_GPU) diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index 565e0b430d..d150cca9d4 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -14,237 +14,13 @@ limitations under the License. */ #pragma once -#include -#include -#include -#include - -#include "paddle/fluid/framework/dim.h" +#include "paddle/pten/core/ddim.h" namespace paddle { namespace framework { -#define PADDLE_VISIT_DDIM_BASE(rank, callback) \ - case (rank): { \ - constexpr auto kRank = (rank); \ - return (callback); \ - } - -#define PADDLE_VISIT_DDIM(rank, callback) \ - switch (rank) { \ - PADDLE_VISIT_DDIM_BASE(0, callback); \ - PADDLE_VISIT_DDIM_BASE(1, callback); \ - PADDLE_VISIT_DDIM_BASE(2, callback); \ - PADDLE_VISIT_DDIM_BASE(3, callback); \ - PADDLE_VISIT_DDIM_BASE(4, callback); \ - PADDLE_VISIT_DDIM_BASE(5, callback); \ - PADDLE_VISIT_DDIM_BASE(6, callback); \ - PADDLE_VISIT_DDIM_BASE(7, callback); \ - PADDLE_VISIT_DDIM_BASE(8, callback); \ - PADDLE_VISIT_DDIM_BASE(9, callback); \ - default: \ - PADDLE_THROW(platform::errors::Unimplemented( \ - "Invalid dimension to be accessed. Now only supports access to " \ - "dimension 0 to 9, but received dimension is %d.", \ - rank)); \ - } - -template -inline void dynamic_dim_assign(const T1* in, T2* out, int n) { - PADDLE_VISIT_DDIM(n, (static_dim_assign(in, out))); -} - -/** - * \brief A dynamically sized dimension. - * - * The number of dimensions must be between [1, 9]. - */ -class DDim { - public: - constexpr static int kMaxRank = 9; - - DDim() : rank_(1) { dim_[0] = 0; } - - DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); } - - DDim(const int* d, int n) : rank_(n) { - dynamic_dim_assign(d, dim_.GetMutable(), n); - } - - DDim(const int64_t* d, int n) : rank_(n) { - dynamic_dim_assign(d, dim_.GetMutable(), n); - } - - template - /*implicit*/ DDim(const Dim& in) : rank_(D) { // NOLINT - UnsafeCast() = in; - } - - /*implicit*/ DDim(std::initializer_list init_list) - : DDim(init_list.begin(), init_list.size()) {} - - inline DDim& operator=(const DDim& ddim) { return CopyFrom(ddim); } - - template - inline DDim& operator=(const Dim& dim) { - rank_ = D; - UnsafeCast() = dim; - return *this; - } - - inline int64_t& operator[](int idx) { return dim_[idx]; } - - inline int64_t operator[](int idx) const { return dim_[idx]; } - - int64_t& at(int idx) { - PADDLE_ENFORCE_GE(idx, 0, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - PADDLE_ENFORCE_LT(idx, rank_, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - return dim_[idx]; - } - - int64_t at(int idx) const { - PADDLE_ENFORCE_GE(idx, 0, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - PADDLE_ENFORCE_LT(idx, rank_, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - return dim_[idx]; - } - - template - typename std::result_of&)>::type apply_visitor( - Visitor&& visitor) { - PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); - } - - template - typename std::result_of&)>::type apply_visitor( - Visitor&& visitor) const { - PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); - } - - bool operator==(const DDim& d) const; - - bool operator!=(const DDim& d) const; - - inline const int64_t* Get() const { return dim_.Get(); } - - inline int64_t* GetMutable() { return dim_.GetMutable(); } - - inline int size() const { return rank_; } - - std::string to_str() const; - - DDim reshape(const std::vector& shape) const; - - DDim transpose(const std::vector& axis) const; - - private: - template - inline Dim& UnsafeCast() { - static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); - auto* p = static_cast(&dim_); - return *reinterpret_cast*>(p); - } - - template - inline const Dim& UnsafeCast() const { - static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); - auto* p = static_cast(&dim_); - return *reinterpret_cast*>(p); - } - - inline DDim& CopyFrom(const DDim& ddim) { - PADDLE_VISIT_DDIM(ddim.rank_, (*this = ddim.UnsafeCast())); - } - - friend DDim stride(const DDim& ddim); - friend DDim stride_numel(const DDim& ddim); - - private: - Dim dim_; - int rank_; -}; - -#undef PADDLE_VISIT_DDIM_BASE -#undef PADDLE_VISIT_DDIM - -/** - * \brief Make a DDim from std::vector - * - * \param dims An vector of ints. Must be sized between [1, 9] - */ -DDim make_ddim(const std::vector& dims); - -DDim make_ddim(const std::vector& dims); - -/** - * \brief Make a DDim from an initializer list - * - * \param dims An initializer list of ints. Must be sized between [1, 9] - * - */ -DDim make_ddim(std::initializer_list dims); - -template -std::vector vectorize(const DDim& ddim) { - std::vector result(DDim::kMaxRank); - dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); - result.resize(ddim.size()); - return result; -} - -int64_t product(const DDim& ddim); - -bool contain_unknown_dim(const DDim& ddim); - -/** - * \brief Slice a ddim - * - * Slice dim with [begin, end). - * e.g. DDim d = make_ddim({1,2,3,4,5}); - * slice_ddim(d, 1, 3); ====> {2,3} - */ -DDim slice_ddim(const DDim& dim, int begin, int end); - -/** - * \brief What is the length of this dimension? - * - * \param Dynamic dimension to inspect - */ - -int arity(const DDim& ddim); - -std::ostream& operator<<(std::ostream&, const DDim&); - -/** -* \brief Flatten dim to 3d -* e.g., DDim d = mak_ddim({1, 2, 3, 4, 5, 6}) -* flatten_to_3d(d, 2, 4); ===> {1*2, 3*4, 5*6} ===> {2, 12, 30} -*/ -DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims); - -// Reshape a tensor to a matrix. The matrix's first dimension(column length) -// will be the product of tensor's first `num_col_dims` dimensions. -DDim flatten_to_2d(const DDim& src, int num_col_dims); - -DDim flatten_to_1d(const DDim& src); - -DDim stride(const DDim& ddim); +using DDim = pten::framework::DDim; +using namespace pten::framework; // NOLINT -DDim stride_numel(const DDim& ddim); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ddim_test.cc b/paddle/fluid/framework/ddim_test.cc deleted file mode 100644 index e89f77ae49..0000000000 --- a/paddle/fluid/framework/ddim_test.cc +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include - -#include "gtest/gtest.h" -#include "paddle/fluid/framework/ddim.h" - -TEST(DDim, Equality) { - // construct a DDim from an initialization list - paddle::framework::DDim ddim = paddle::framework::make_ddim({9, 1, 5}); - EXPECT_EQ(ddim[0], 9); - EXPECT_EQ(ddim[1], 1); - EXPECT_EQ(ddim[2], 5); - - // construct a DDim from a vector - std::vector vec({9, 1, 5}); - paddle::framework::DDim vddim = paddle::framework::make_ddim(vec); - EXPECT_EQ(ddim[0], 9); - EXPECT_EQ(ddim[1], 1); - EXPECT_EQ(ddim[2], 5); - - // mutate a DDim - ddim[1] = 2; - EXPECT_EQ(ddim[1], 2); - ddim[0] = 6; - EXPECT_EQ(ddim[0], 6); - - // vectorize a DDim - std::vector res_vec = paddle::framework::vectorize(vddim); - EXPECT_EQ(res_vec[0], 9); - EXPECT_EQ(res_vec[1], 1); - EXPECT_EQ(res_vec[2], 5); - paddle::framework::Dim<3> d(3, 2, 1); - res_vec = paddle::framework::vectorize(paddle::framework::DDim(d)); - EXPECT_EQ(res_vec[0], 3); - EXPECT_EQ(res_vec[1], 2); - EXPECT_EQ(res_vec[2], 1); - - // arity of a DDim - EXPECT_EQ(paddle::framework::arity(ddim), 3); - EXPECT_EQ(ddim.size(), 3); - - // product of a DDim - EXPECT_EQ(paddle::framework::product(vddim), 45); - EXPECT_EQ( - paddle::framework::product(paddle::framework::make_ddim({3, 2, 5, 3})), - 90); - - // slice a DDim - paddle::framework::DDim ddim2 = - paddle::framework::make_ddim({1, 2, 3, 4, 5, 6}); - paddle::framework::DDim ss = paddle::framework::slice_ddim(ddim2, 2, 5); - EXPECT_EQ(arity(ss), 3); - EXPECT_EQ(ss[0], 3); - EXPECT_EQ(ss[1], 4); - EXPECT_EQ(ss[2], 5); - paddle::framework::DDim ss2 = paddle::framework::slice_ddim(ddim2, 0, 6); - EXPECT_EQ(arity(ss2), 6); - EXPECT_EQ(ss2[0], 1); - EXPECT_EQ(ss2[1], 2); - EXPECT_EQ(ss2[2], 3); - EXPECT_EQ(ss2[3], 4); - EXPECT_EQ(ss2[4], 5); - EXPECT_EQ(ss2[5], 6); -} - -TEST(DDim, Print) { - // print a DDim - std::stringstream ss; - paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 3, 4}); - ss << ddim; - EXPECT_EQ("2, 3, 4", ss.str()); -} diff --git a/paddle/fluid/framework/dim.h b/paddle/fluid/framework/dim.h index 66214b265f..6abae4e731 100644 --- a/paddle/fluid/framework/dim.h +++ b/paddle/fluid/framework/dim.h @@ -12,89 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. #pragma once - -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/array.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/dim.h" namespace paddle { namespace framework { - -// Statically sized, statically indexed dimension template -class Dim : public Array { - public: - static_assert(D >= 0, "D must be not less than 0"); - - static constexpr int kRank = D; - using BaseClass = Array; - - inline Dim(int64_t head, const Dim& tail) { - (*this)[0] = head; - new (this->GetMutable() + 1) Dim(tail); - } - - template - HOSTDEVICE explicit Dim(int64_t head, Args... args) - : BaseClass(head, args...) {} - - /** Construct a Dim with each dimension set to the given index */ - HOSTDEVICE explicit Dim(int64_t idx) { this->Fill(idx); } - - HOSTDEVICE Dim() = default; - - HOST std::string to_string() const; -}; - -// Product of a Dim -template -HOSTDEVICE inline int64_t product(const Dim& a) { - return UnrollProduct::Run(a.Get()); -} - -/** - * Helper function to create a Dim - * - * \param idxes The type of Dim constructed depends on the number of params - * - */ - -template -HOSTDEVICE inline Dim make_dim(Args... idxes) { - return Dim(idxes...); -} - -// Allows us to output a Dim -template -inline std::ostream& operator<<(std::ostream& os, const Dim& d) { - os << d[0]; - for (int i = 1; i < D; ++i) { - os << ", " << d[i]; - } - return os; -} - -inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) { - return os; -} - -template -HOST std::string Dim::to_string() const { - std::stringstream stream; - stream << *this; - return stream.str(); -} - -template -inline void static_dim_assign(const T1* in, T2* out) { - UnrollAssign::Run(in, out); -} +using Dim = pten::framework::Dim; +using namespace pten::framework; // NOLINT } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/amp/check_finite_and_unscale_op.h b/paddle/fluid/operators/amp/check_finite_and_unscale_op.h index 29b96c4a67..49ca2c3862 100644 --- a/paddle/fluid/operators/amp/check_finite_and_unscale_op.h +++ b/paddle/fluid/operators/amp/check_finite_and_unscale_op.h @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/isfinite_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/amp/update_loss_scaling_op.h b/paddle/fluid/operators/amp/update_loss_scaling_op.h index decc3c3b92..2c953d4eee 100644 --- a/paddle/fluid/operators/amp/update_loss_scaling_op.h +++ b/paddle/fluid/operators/amp/update_loss_scaling_op.h @@ -24,7 +24,7 @@ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/errors.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bce_loss_op.cu b/paddle/fluid/operators/bce_loss_op.cu index d493dad132..6595d6decc 100644 --- a/paddle/fluid/operators/bce_loss_op.cu +++ b/paddle/fluid/operators/bce_loss_op.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bernoulli_op.h b/paddle/fluid/operators/bernoulli_op.h index 40f285d11f..da66742e08 100644 --- a/paddle/fluid/operators/bernoulli_op.h +++ b/paddle/fluid/operators/bernoulli_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bilateral_slice_op.h b/paddle/fluid/operators/bilateral_slice_op.h index 0903fe4c71..3ef13c421c 100644 --- a/paddle/fluid/operators/bilateral_slice_op.h +++ b/paddle/fluid/operators/bilateral_slice_op.h @@ -13,7 +13,7 @@ #include #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bincount_op.cu b/paddle/fluid/operators/bincount_op.cu index cf189193d1..5964b9e345 100644 --- a/paddle/fluid/operators/bincount_op.cu +++ b/paddle/fluid/operators/bincount_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/bincount_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/deformable_conv_func.h b/paddle/fluid/operators/deformable_conv_func.h index ba1c504430..99d1d7c477 100644 --- a/paddle/fluid/operators/deformable_conv_func.h +++ b/paddle/fluid/operators/deformable_conv_func.h @@ -24,7 +24,7 @@ #pragma once #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" template HOSTDEVICE T DmcnGetGradientWeight(T argmax_h, T argmax_w, const int h, diff --git a/paddle/fluid/operators/dequantize_log_op.cu b/paddle/fluid/operators/dequantize_log_op.cu index 39f4fdb71b..821b87bf05 100644 --- a/paddle/fluid/operators/dequantize_log_op.cu +++ b/paddle/fluid/operators/dequantize_log_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/dequantize_log_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/box_clip_op.cu b/paddle/fluid/operators/detection/box_clip_op.cu index 17013efcc9..53727d9d08 100644 --- a/paddle/fluid/operators/detection/box_clip_op.cu +++ b/paddle/fluid/operators/detection/box_clip_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detection/box_clip_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu index 10c402e5a4..7102c4cffe 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/yolo_box_op.h b/paddle/fluid/operators/detection/yolo_box_op.h index e06c81052a..31a67ecc26 100644 --- a/paddle/fluid/operators/detection/yolo_box_op.h +++ b/paddle/fluid/operators/detection/yolo_box_op.h @@ -14,7 +14,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/distribution_helper.h b/paddle/fluid/operators/distribution_helper.h index 8bb963979e..a13ae57090 100644 --- a/paddle/fluid/operators/distribution_helper.h +++ b/paddle/fluid/operators/distribution_helper.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/for_range.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #if !defined(_WIN32) #define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) diff --git a/paddle/fluid/operators/elementwise/elementwise_functor.h b/paddle/fluid/operators/elementwise/elementwise_functor.h index 8a6cadc241..daca105ce4 100644 --- a/paddle/fluid/operators/elementwise/elementwise_functor.h +++ b/paddle/fluid/operators/elementwise/elementwise_functor.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/platform/complex.h" +#include "paddle/pten/core/array.h" #include "paddle/pten/kernels/funcs/elementwise_functor.h" namespace paddle { @@ -92,12 +92,12 @@ using Complex = paddle::platform::complex; template struct DivGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()(const InT a, - const InT b, - const InT c) { + inline HOSTDEVICE pten::framework::Array operator()(const InT a, + const InT b, + const InT c) { // dx = dout / y // dy = - dout * out / y - paddle::framework::Array outs; + pten::framework::Array outs; outs[0] = a / c; outs[1] = -a * b / c; return outs; @@ -106,9 +106,9 @@ struct DivGradXYFunctor { template struct DivGradXYFunctor, Complex> { - inline HOSTDEVICE paddle::framework::Array, 2> operator()( + inline HOSTDEVICE pten::framework::Array, 2> operator()( const Complex a, const Complex b, const Complex c) { - paddle::framework::Array, 2> outs; + pten::framework::Array, 2> outs; Complex c_conj(c.real, -c.imag); Complex out_div_c_conj((b / c).real, -(b / c).imag); outs[0] = a / c_conj; @@ -247,9 +247,9 @@ struct MinGradYFunctor { template struct MinGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()( + inline HOSTDEVICE pten::framework::Array operator()( const InT& x, const InT& y, const InT& dout) { - paddle::framework::Array outs; + pten::framework::Array outs; // dx = dout * (x < y) outs[0] = static_cast(dout * static_cast(x < y)); // dy = dout * (x >= y) @@ -273,10 +273,10 @@ struct MulGradFunctor> { template struct MulGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()(const InT a, - const InT b, - const InT c) { - paddle::framework::Array outs; + inline HOSTDEVICE pten::framework::Array operator()(const InT a, + const InT b, + const InT c) { + pten::framework::Array outs; // dx = dout * y outs[0] = a * b; // dy = dout * x @@ -287,9 +287,9 @@ struct MulGradXYFunctor { template struct MulGradXYFunctor, Complex> { - inline HOSTDEVICE paddle::framework::Array, 2> operator()( + inline HOSTDEVICE pten::framework::Array, 2> operator()( const Complex a, const Complex b, const Complex c) { - paddle::framework::Array, 2> outs; + pten::framework::Array, 2> outs; // dx = dout * y Complex b_conj(b.real, -b.imag); outs[0] = a * b_conj; @@ -316,9 +316,9 @@ struct MaxGradYFunctor { template struct MaxGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()( + inline HOSTDEVICE pten::framework::Array operator()( const InT& x, const InT& y, const InT& dout) { - paddle::framework::Array outs; + pten::framework::Array outs; // dx = dout * (x > y) outs[0] = static_cast(dout * static_cast(x > y)); // dy = dout * (x <= y) diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index 21e7079ff6..c31139611e 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -20,8 +20,8 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h index da386052c7..a595e5078b 100644 --- a/paddle/fluid/operators/grid_sampler_op.h +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/gather.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/histogram_op.cu b/paddle/fluid/operators/histogram_op.cu index 2bf259f7d7..a34f4b8a22 100644 --- a/paddle/fluid/operators/histogram_op.cu +++ b/paddle/fluid/operators/histogram_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/histogram_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h index 93cfba1964..fbfed71e1e 100644 --- a/paddle/fluid/operators/huber_loss_op.h +++ b/paddle/fluid/operators/huber_loss_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/interpolate_op.h b/paddle/fluid/operators/interpolate_op.h index baa292319d..0c0dde6bd4 100644 --- a/paddle/fluid/operators/interpolate_op.h +++ b/paddle/fluid/operators/interpolate_op.h @@ -15,7 +15,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/interpolate_v2_op.h b/paddle/fluid/operators/interpolate_v2_op.h index a5afb18b3f..4d6189b57b 100644 --- a/paddle/fluid/operators/interpolate_v2_op.h +++ b/paddle/fluid/operators/interpolate_v2_op.h @@ -15,7 +15,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/kernel_primitives/datamover_primitives.h b/paddle/fluid/operators/kernel_primitives/datamover_primitives.h index ce45ed0301..45697073cb 100644 --- a/paddle/fluid/operators/kernel_primitives/datamover_primitives.h +++ b/paddle/fluid/operators/kernel_primitives/datamover_primitives.h @@ -20,6 +20,7 @@ #ifdef PADDLE_WITH_HIP #include #endif +#include "paddle/pten/core/ddim.h" namespace paddle { namespace operators { @@ -85,7 +86,7 @@ struct FastDivMod { template struct BroadcastConfig { FastDivMod divmoders[kDims]; - uint32_t strides[framework::DDim::kMaxRank]; + uint32_t strides[pten::framework::DDim::kMaxRank]; HOSTDEVICE BroadcastConfig() {} HOSTDEVICE BroadcastConfig(const std::vector& out_dims, diff --git a/paddle/fluid/operators/kldiv_loss_op.h b/paddle/fluid/operators/kldiv_loss_op.h index 0bc53d7dd7..40199677fe 100644 --- a/paddle/fluid/operators/kldiv_loss_op.h +++ b/paddle/fluid/operators/kldiv_loss_op.h @@ -13,7 +13,7 @@ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 3949a066e0..b758efb065 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -19,7 +19,7 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.c #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cross_entropy_op.h" #include "paddle/fluid/operators/lstm_unit_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math.h b/paddle/fluid/operators/math.h index 3b28928a52..f5ce5af70b 100644 --- a/paddle/fluid/operators/math.h +++ b/paddle/fluid/operators/math.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #include "math.h" // NOLINT diff --git a/paddle/fluid/operators/math/algorithm.h b/paddle/fluid/operators/math/algorithm.h index 346c693a22..cbe1a03d90 100644 --- a/paddle/fluid/operators/math/algorithm.h +++ b/paddle/fluid/operators/math/algorithm.h @@ -18,7 +18,7 @@ #include // for int64_t #include -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/complex_functors.h b/paddle/fluid/operators/math/complex_functors.h index 3214adb095..48f16b87cb 100644 --- a/paddle/fluid/operators/math/complex_functors.h +++ b/paddle/fluid/operators/math/complex_functors.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/platform/complex.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cos_sim_functor.h b/paddle/fluid/operators/math/cos_sim_functor.h index 9a24bfc331..61827af950 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.h +++ b/paddle/fluid/operators/math/cos_sim_functor.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index db19818951..e7ac1760d3 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index f88b4a6e41..89a1efe133 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index 38bd1a3dad..def25a680c 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/gru_kernel.h b/paddle/fluid/operators/math/detail/gru_kernel.h index d9be8e8065..603f5f3426 100644 --- a/paddle/fluid/operators/math/detail/gru_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_kernel.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" // TODO(guosheng): refine code style in gru_kernel namespace paddle { diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index 003ec19436..33dcde4590 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/maxouting.h b/paddle/fluid/operators/math/maxouting.h index 50bddf73bc..ceeb85d6d3 100644 --- a/paddle/fluid/operators/math/maxouting.h +++ b/paddle/fluid/operators/math/maxouting.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/macros.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 4743f0dc9f..f0637a40b8 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -20,8 +20,8 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/macros.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.cu b/paddle/fluid/operators/modified_huber_loss_op.cu index 3c85da3c52..ea08dc8084 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cu +++ b/paddle/fluid/operators/modified_huber_loss_op.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/modified_huber_loss_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.h b/paddle/fluid/operators/modified_huber_loss_op.h index 398676ba74..4f552edf97 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.h +++ b/paddle/fluid/operators/modified_huber_loss_op.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multinomial_op.h b/paddle/fluid/operators/multinomial_op.h index 14cfbd2683..df4c2e9e7b 100644 --- a/paddle/fluid/operators/multinomial_op.h +++ b/paddle/fluid/operators/multinomial_op.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nll_loss_op.cu b/paddle/fluid/operators/nll_loss_op.cu index 03af456341..e3c99afe82 100644 --- a/paddle/fluid/operators/nll_loss_op.cu +++ b/paddle/fluid/operators/nll_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/nll_loss_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roll_op.cu b/paddle/fluid/operators/roll_op.cu index 57986d2628..7e8e37bd2e 100644 --- a/paddle/fluid/operators/roll_op.cu +++ b/paddle/fluid/operators/roll_op.cu @@ -13,11 +13,11 @@ // limitations under the License. #pragma once -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/roll_op.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" +#include "paddle/pten/core/array.h" namespace paddle { namespace operators { @@ -28,9 +28,9 @@ using LoDTensor = framework::LoDTensor; template __global__ void RollCudaKernel(const T* input, T* output, int64_t N, - paddle::framework::Array shifts, - paddle::framework::Array strides, - paddle::framework::Array sizes) { + pten::framework::Array shifts, + pten::framework::Array strides, + pten::framework::Array sizes) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; @@ -101,9 +101,9 @@ class RollKernel #define CALL_ROLL_CUDA_KERNEL(N) \ case N: { \ - paddle::framework::Array _strides; \ - paddle::framework::Array _shifts; \ - paddle::framework::Array _sizes; \ + pten::framework::Array _strides; \ + pten::framework::Array _shifts; \ + pten::framework::Array _sizes; \ for (size_t idx = 0; idx < N; ++idx) { \ _strides[idx] = strides[idx]; \ _shifts[idx] = shifts[idx]; \ diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu index cc012230c1..de29822b8d 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -22,7 +22,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h index efe3afba18..e30b48b150 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ b/paddle/fluid/operators/smooth_l1_loss_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unstack_op.h b/paddle/fluid/operators/unstack_op.h index cfd4d6bce8..413470e3db 100644 --- a/paddle/fluid/operators/unstack_op.h +++ b/paddle/fluid/operators/unstack_op.h @@ -20,7 +20,6 @@ limitations under the License. */ #if defined(__NVCC__) || defined(__HIPCC__) #include -#include "paddle/fluid/framework/array.h" #endif namespace paddle { diff --git a/paddle/fluid/platform/aligned_vector.h b/paddle/fluid/platform/aligned_vector.h index 7d014f6bdc..144c017414 100644 --- a/paddle/fluid/platform/aligned_vector.h +++ b/paddle/fluid/platform/aligned_vector.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/eigen_ext.h b/paddle/fluid/platform/eigen_ext.h index 2b3d1693f6..872a6cf062 100644 --- a/paddle/fluid/platform/eigen_ext.h +++ b/paddle/fluid/platform/eigen_ext.h @@ -17,7 +17,7 @@ #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #include "unsupported/Eigen/CXX11/Tensor" diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index cc9919d836..e3a3914628 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/place.h" +#include "paddle/pten/core/hostdevice.h" #if defined(__NVCC__) || defined(__HIPCC__) #include diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index 23f5865971..32ec113d1f 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +#include "paddle/pten/core/hostdevice.h" template class Scale { diff --git a/paddle/pten/api/include/tensor.h b/paddle/pten/api/include/tensor.h index c26c9ce839..d2afd703ea 100644 --- a/paddle/pten/api/include/tensor.h +++ b/paddle/pten/api/include/tensor.h @@ -42,12 +42,12 @@ class DenseTensor; namespace pten { class TensorBase; +namespace framework { +class DDim; +} // namespace framework } // namespace pten namespace paddle { -namespace framework { -class DDim; -} namespace experimental { @@ -159,9 +159,9 @@ class PADDLE_API Tensor final { /** * @brief Return the dimensions of Tensor. * - * @return paddle::framework::DDim + * @return pten::framework::DDim */ - paddle::framework::DDim dims() const; + pten::framework::DDim dims() const; /** * @brief Return the shape (dimensions) of Tensor. diff --git a/paddle/pten/api/lib/tensor.cc b/paddle/pten/api/lib/tensor.cc index cb70d26f94..0ccc9c56db 100644 --- a/paddle/pten/api/lib/tensor.cc +++ b/paddle/pten/api/lib/tensor.cc @@ -47,13 +47,13 @@ limitations under the License. */ * In the future, the necessary components will be moved to the this library, * or the corresponding components will be re-implemented. */ -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/stream/cuda_stream.h" #include "paddle/pten/common/complex.h" #include "paddle/pten/common/float16.h" +#include "paddle/pten/core/ddim.h" namespace paddle { namespace experimental { @@ -94,10 +94,10 @@ int64_t Tensor::numel() const { return impl_->numel(); } int64_t Tensor::size() const { return impl_->numel(); } -paddle::framework::DDim Tensor::dims() const { return impl_->dims(); } +pten::framework::DDim Tensor::dims() const { return impl_->dims(); } std::vector Tensor::shape() const { - return paddle::framework::vectorize(impl_->dims()); + return pten::framework::vectorize(impl_->dims()); } void Tensor::reshape(const std::vector &shape) { diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index facc9ac005..eabc5a19ba 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -15,6 +15,15 @@ cc_library(tensor_meta SRCS tensor_meta.cc DEPS enforce mixed_vector) cc_library(dense_tensor SRCS dense_tensor.cc DEPS convert_utils tensor_meta tensor_base) cc_library(pten_device_context SRCS device_context.cc DEPS tensor_base ) +cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) +cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) +cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) +if(WITH_GPU) + nv_test(dim_test SRCS dim_test.cu DEPS ddim) +elseif(WITH_ROCM) + hip_test(dim_test SRCS dim_test.cu DEPS ddim) +endif() + # Will remove once we implemented MKLDNN_Tensor if(WITH_MKLDNN) add_dependencies(dense_tensor mkldnn) diff --git a/paddle/fluid/framework/array.h b/paddle/pten/core/array.h similarity index 94% rename from paddle/fluid/framework/array.h rename to paddle/pten/core/array.h index 0ec9cb8112..86d222d2d5 100644 --- a/paddle/fluid/framework/array.h +++ b/paddle/pten/core/array.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,12 @@ #pragma once #include -#include "paddle/fluid/framework/unroll_array_ops.h" +#include "paddle/pten/core/unroll_array_ops.h" +// TODO(paddle-dev): Need to modify into pten/core/enforce.h #include "paddle/fluid/platform/enforce.h" -namespace paddle { +namespace pten { +namespace platform = paddle::platform; namespace framework { template @@ -146,4 +148,4 @@ class Array { }; } // namespace framework -} // namespace paddle +} // namespace pten diff --git a/paddle/fluid/framework/ddim.cc b/paddle/pten/core/ddim.cc similarity index 77% rename from paddle/fluid/framework/ddim.cc rename to paddle/pten/core/ddim.cc index 8bac8b7df6..663f92a5bf 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/pten/core/ddim.cc @@ -1,22 +1,22 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/ddim.h" +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/core/ddim.h" #include -#include "paddle/fluid/platform/enforce.h" -namespace paddle { +namespace pten { +namespace platform = paddle::platform; namespace framework { DDim make_ddim(std::initializer_list dims) { @@ -82,10 +82,13 @@ bool contain_unknown_dim(const DDim& ddim) { DDim slice_ddim(const DDim& dim, int begin, int end) { PADDLE_ENFORCE_EQ( - (begin >= 0 && end <= dim.size()), true, + (begin >= 0 && end <= dim.size()), + true, platform::errors::InvalidArgument( - "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin, - end, dim.size())); + "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", + begin, + end, + dim.size())); // Constructor of DDim would check whether end - begin is valid return DDim(dim.Get() + begin, end - begin); } @@ -108,27 +111,34 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { } DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims) { - PADDLE_ENFORCE_GE(src.size(), 3, + PADDLE_ENFORCE_GE(src.size(), + 3, platform::errors::InvalidArgument( "The rank of src dim should be at least 3 " "in flatten_to_3d, but received %d.", src.size())); - PADDLE_ENFORCE_EQ((num_row_dims >= 1 && num_row_dims < src.size()), true, + PADDLE_ENFORCE_EQ((num_row_dims >= 1 && num_row_dims < src.size()), + true, platform::errors::InvalidArgument( "The num_row_dims should be inside [1, %d] " "in flatten_to_3d, but received %d.", - src.size() - 1, num_row_dims)); - PADDLE_ENFORCE_EQ((num_col_dims >= 2 && num_col_dims <= src.size()), true, + src.size() - 1, + num_row_dims)); + PADDLE_ENFORCE_EQ((num_col_dims >= 2 && num_col_dims <= src.size()), + true, platform::errors::InvalidArgument( "The num_col_dims should be inside [2, %d] " "in flatten_to_3d, but received %d.", - src.size(), num_col_dims)); + src.size(), + num_col_dims)); PADDLE_ENFORCE_GE( - num_col_dims, num_row_dims, + num_col_dims, + num_row_dims, platform::errors::InvalidArgument( "The num_row_dims should be less than num_col_dims in flatten_to_3d," "but received num_row_dims = %d, num_col_dims = %d.", - num_row_dims, num_col_dims)); + num_row_dims, + num_col_dims)); return DDim({product(slice_ddim(src, 0, num_row_dims)), product(slice_ddim(src, num_row_dims, num_col_dims)), @@ -169,13 +179,16 @@ DDim DDim::reshape(const std::vector& shape) const { out_dims.rank_ = shape.size(); for (size_t i = 0; i < shape.size(); ++i) { if (shape[i] == copy_dim_val) { - PADDLE_ENFORCE_LT(static_cast(i), in_dims.size(), + PADDLE_ENFORCE_LT(static_cast(i), + in_dims.size(), platform::errors::InvalidArgument( "Index %d of shape under which the value of 0 " "is stored, must be lower than the number of " "old dimensions. But received shape[%d] = 0, " "dimensions = %d, shape = [%s].", - i, in_dims.size(), in_dims)); + i, + in_dims.size(), + in_dims)); out_dims[i] = in_dims[i]; } else { out_dims[i] = shape[i]; @@ -190,19 +203,23 @@ DDim DDim::transpose(const std::vector& axis) const { size_t axis_size = axis.size(); auto axis_set = std::set(axis.begin(), axis.end()); - PADDLE_ENFORCE_EQ(axis_set.size(), axis_size, + PADDLE_ENFORCE_EQ(axis_set.size(), + axis_size, platform::errors::InvalidArgument( "In an axis array, elements must be unique.")); PADDLE_ENFORCE_EQ( - in_rank, axis_size, + in_rank, + axis_size, platform::errors::InvalidArgument("The input dimension's size " "should be equal to the axis's size. " "But received dimension is %d, " "axis's size is %d", - in_rank, axis_size)); + in_rank, + axis_size)); - PADDLE_ENFORCE_LT(*std::max_element(axis.begin(), axis.end()), axis_size, + PADDLE_ENFORCE_LT(*std::max_element(axis.begin(), axis.end()), + axis_size, platform::errors::InvalidArgument( "Axis values must be ranging from 0 to (dims - 1).")); @@ -214,4 +231,4 @@ DDim DDim::transpose(const std::vector& axis) const { } } // namespace framework -} // namespace paddle +} // namespace pten \ No newline at end of file diff --git a/paddle/pten/core/ddim.h b/paddle/pten/core/ddim.h new file mode 100644 index 0000000000..148c32481c --- /dev/null +++ b/paddle/pten/core/ddim.h @@ -0,0 +1,257 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +#include +#include + +#include "paddle/pten/core/dim.h" + +namespace pten { +namespace platform = paddle::platform; +namespace framework { + +#define PADDLE_VISIT_DDIM_BASE(rank, callback) \ + case (rank): { \ + constexpr auto kRank = (rank); \ + return (callback); \ + } + +#define PADDLE_VISIT_DDIM(rank, callback) \ + switch (rank) { \ + PADDLE_VISIT_DDIM_BASE(0, callback); \ + PADDLE_VISIT_DDIM_BASE(1, callback); \ + PADDLE_VISIT_DDIM_BASE(2, callback); \ + PADDLE_VISIT_DDIM_BASE(3, callback); \ + PADDLE_VISIT_DDIM_BASE(4, callback); \ + PADDLE_VISIT_DDIM_BASE(5, callback); \ + PADDLE_VISIT_DDIM_BASE(6, callback); \ + PADDLE_VISIT_DDIM_BASE(7, callback); \ + PADDLE_VISIT_DDIM_BASE(8, callback); \ + PADDLE_VISIT_DDIM_BASE(9, callback); \ + default: \ + PADDLE_THROW(platform::errors::Unimplemented( \ + "Invalid dimension to be accessed. Now only supports access to " \ + "dimension 0 to 9, but received dimension is %d.", \ + rank)); \ + } + +template +inline void dynamic_dim_assign(const T1* in, T2* out, int n) { + PADDLE_VISIT_DDIM(n, (static_dim_assign(in, out))); +} + +/** + * \brief A dynamically sized dimension. + * + * The number of dimensions must be between [1, 9]. + */ +class DDim { + public: + constexpr static int kMaxRank = 9; + + DDim() : rank_(1) { dim_[0] = 0; } + + DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); } + + DDim(const int* d, int n) : rank_(n) { + dynamic_dim_assign(d, dim_.GetMutable(), n); + } + + DDim(const int64_t* d, int n) : rank_(n) { + dynamic_dim_assign(d, dim_.GetMutable(), n); + } + + template + /*implicit*/ DDim(const Dim& in) : rank_(D) { // NOLINT + UnsafeCast() = in; + } + + /*implicit*/ DDim(std::initializer_list init_list) + : DDim(init_list.begin(), init_list.size()) {} + + inline DDim& operator=(const DDim& ddim) { return CopyFrom(ddim); } + + template + inline DDim& operator=(const Dim& dim) { + rank_ = D; + UnsafeCast() = dim; + return *this; + } + + inline int64_t& operator[](int idx) { return dim_[idx]; } + + inline int64_t operator[](int idx) const { return dim_[idx]; } + + int64_t& at(int idx) { + PADDLE_ENFORCE_GE(idx, + 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + PADDLE_ENFORCE_LT(idx, + rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + return dim_[idx]; + } + + int64_t at(int idx) const { + PADDLE_ENFORCE_GE(idx, + 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + PADDLE_ENFORCE_LT(idx, + rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + return dim_[idx]; + } + + template + typename std::result_of&)>::type apply_visitor( + Visitor&& visitor) { + PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); + } + + template + typename std::result_of&)>::type apply_visitor( + Visitor&& visitor) const { + PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); + } + + bool operator==(const DDim& d) const; + + bool operator!=(const DDim& d) const; + + inline const int64_t* Get() const { return dim_.Get(); } + + inline int64_t* GetMutable() { return dim_.GetMutable(); } + + inline int size() const { return rank_; } + + std::string to_str() const; + + DDim reshape(const std::vector& shape) const; + + DDim transpose(const std::vector& axis) const; + + private: + template + inline Dim& UnsafeCast() { + static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); + auto* p = static_cast(&dim_); + return *reinterpret_cast*>(p); + } + + template + inline const Dim& UnsafeCast() const { + static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); + auto* p = static_cast(&dim_); + return *reinterpret_cast*>(p); + } + + inline DDim& CopyFrom(const DDim& ddim) { + PADDLE_VISIT_DDIM(ddim.rank_, (*this = ddim.UnsafeCast())); + } + + friend DDim stride(const DDim& ddim); + friend DDim stride_numel(const DDim& ddim); + + private: + Dim dim_; + int rank_; +}; + +#undef PADDLE_VISIT_DDIM_BASE +#undef PADDLE_VISIT_DDIM + +/** + * \brief Make a DDim from std::vector + * + * \param dims An vector of ints. Must be sized between [1, 9] + */ +DDim make_ddim(const std::vector& dims); + +DDim make_ddim(const std::vector& dims); + +/** + * \brief Make a DDim from an initializer list + * + * \param dims An initializer list of ints. Must be sized between [1, 9] + * + */ +DDim make_ddim(std::initializer_list dims); + +template +std::vector vectorize(const DDim& ddim) { + std::vector result(DDim::kMaxRank); + dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); + result.resize(ddim.size()); + return result; +} + +int64_t product(const DDim& ddim); + +bool contain_unknown_dim(const DDim& ddim); + +/** + * \brief Slice a ddim + * + * Slice dim with [begin, end). + * e.g. DDim d = make_ddim({1,2,3,4,5}); + * slice_ddim(d, 1, 3); ====> {2,3} + */ +DDim slice_ddim(const DDim& dim, int begin, int end); + +/** + * \brief What is the length of this dimension? + * + * \param Dynamic dimension to inspect + */ + +int arity(const DDim& ddim); + +std::ostream& operator<<(std::ostream&, const DDim&); + +/** +* \brief Flatten dim to 3d +* e.g., DDim d = mak_ddim({1, 2, 3, 4, 5, 6}) +* flatten_to_3d(d, 2, 4); ===> {1*2, 3*4, 5*6} ===> {2, 12, 30} +*/ +DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims); + +// Reshape a tensor to a matrix. The matrix's first dimension(column length) +// will be the product of tensor's first `num_col_dims` dimensions. +DDim flatten_to_2d(const DDim& src, int num_col_dims); + +DDim flatten_to_1d(const DDim& src); + +DDim stride(const DDim& ddim); + +DDim stride_numel(const DDim& ddim); +} // namespace framework +} // namespace pten diff --git a/paddle/pten/core/ddim_test.cc b/paddle/pten/core/ddim_test.cc new file mode 100644 index 0000000000..1903bbfdff --- /dev/null +++ b/paddle/pten/core/ddim_test.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "gtest/gtest.h" +#include "paddle/pten/core/ddim.h" + +TEST(DDim, Equality) { + // construct a DDim from an initialization list + pten::framework::DDim ddim = pten::framework::make_ddim({9, 1, 5}); + EXPECT_EQ(ddim[0], 9); + EXPECT_EQ(ddim[1], 1); + EXPECT_EQ(ddim[2], 5); + + // construct a DDim from a vector + std::vector vec({9, 1, 5}); + pten::framework::DDim vddim = pten::framework::make_ddim(vec); + EXPECT_EQ(ddim[0], 9); + EXPECT_EQ(ddim[1], 1); + EXPECT_EQ(ddim[2], 5); + + // mutate a DDim + ddim[1] = 2; + EXPECT_EQ(ddim[1], 2); + ddim[0] = 6; + EXPECT_EQ(ddim[0], 6); + + // vectorize a DDim + std::vector res_vec = pten::framework::vectorize(vddim); + EXPECT_EQ(res_vec[0], 9); + EXPECT_EQ(res_vec[1], 1); + EXPECT_EQ(res_vec[2], 5); + pten::framework::Dim<3> d(3, 2, 1); + res_vec = pten::framework::vectorize(pten::framework::DDim(d)); + EXPECT_EQ(res_vec[0], 3); + EXPECT_EQ(res_vec[1], 2); + EXPECT_EQ(res_vec[2], 1); + + // arity of a DDim + EXPECT_EQ(pten::framework::arity(ddim), 3); + EXPECT_EQ(ddim.size(), 3); + + // product of a DDim + EXPECT_EQ(pten::framework::product(vddim), 45); + EXPECT_EQ(pten::framework::product(pten::framework::make_ddim({3, 2, 5, 3})), + 90); + + // slice a DDim + pten::framework::DDim ddim2 = pten::framework::make_ddim({1, 2, 3, 4, 5, 6}); + pten::framework::DDim ss = pten::framework::slice_ddim(ddim2, 2, 5); + EXPECT_EQ(arity(ss), 3); + EXPECT_EQ(ss[0], 3); + EXPECT_EQ(ss[1], 4); + EXPECT_EQ(ss[2], 5); + pten::framework::DDim ss2 = pten::framework::slice_ddim(ddim2, 0, 6); + EXPECT_EQ(arity(ss2), 6); + EXPECT_EQ(ss2[0], 1); + EXPECT_EQ(ss2[1], 2); + EXPECT_EQ(ss2[2], 3); + EXPECT_EQ(ss2[3], 4); + EXPECT_EQ(ss2[4], 5); + EXPECT_EQ(ss2[5], 6); +} + +TEST(DDim, Print) { + // print a DDim + std::stringstream ss; + pten::framework::DDim ddim = pten::framework::make_ddim({2, 3, 4}); + ss << ddim; + EXPECT_EQ("2, 3, 4", ss.str()); +} diff --git a/paddle/pten/core/dim.h b/paddle/pten/core/dim.h new file mode 100644 index 0000000000..8dd984891a --- /dev/null +++ b/paddle/pten/core/dim.h @@ -0,0 +1,100 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "paddle/pten/core/array.h" +#include "paddle/pten/core/hostdevice.h" + +namespace pten { +namespace framework { + +// Statically sized, statically indexed dimension +template +class Dim : public Array { + public: + static_assert(D >= 0, "D must be not less than 0"); + + static constexpr int kRank = D; + using BaseClass = Array; + + inline Dim(int64_t head, const Dim& tail) { + (*this)[0] = head; + new (this->GetMutable() + 1) Dim(tail); + } + + template + HOSTDEVICE explicit Dim(int64_t head, Args... args) + : BaseClass(head, args...) {} + + /** Construct a Dim with each dimension set to the given index */ + HOSTDEVICE explicit Dim(int64_t idx) { this->Fill(idx); } + + HOSTDEVICE Dim() = default; + + HOST std::string to_string() const; +}; + +// Product of a Dim +template +HOSTDEVICE inline int64_t product(const Dim& a) { + return UnrollProduct::Run(a.Get()); +} + +/** + * Helper function to create a Dim + * + * \param idxes The type of Dim constructed depends on the number of params + * + */ + +template +HOSTDEVICE inline Dim make_dim(Args... idxes) { + return Dim(idxes...); +} + +// Allows us to output a Dim +template +inline std::ostream& operator<<(std::ostream& os, const Dim& d) { + os << d[0]; + for (int i = 1; i < D; ++i) { + os << ", " << d[i]; + } + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) { + return os; +} + +template +HOST std::string Dim::to_string() const { + std::stringstream stream; + stream << *this; + return stream.str(); +} + +template +inline void static_dim_assign(const T1* in, T2* out) { + UnrollAssign::Run(in, out); +} + +} // namespace framework +} // namespace pten diff --git a/paddle/fluid/framework/dim_test.cu b/paddle/pten/core/dim_test.cu similarity index 62% rename from paddle/fluid/framework/dim_test.cu rename to paddle/pten/core/dim_test.cu index b3c26b10c6..0f8d71c5d3 100644 --- a/paddle/fluid/framework/dim_test.cu +++ b/paddle/pten/core/dim_test.cu @@ -1,42 +1,43 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + #include #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/dim.h" +#include "paddle/pten/core/dim.h" -__global__ void test(paddle::framework::Dim<2>* o) { - o[0] = paddle::framework::make_dim(5, 6); +__global__ void test(pten::framework::Dim<2>* o) { + o[0] = pten::framework::make_dim(5, 6); } __global__ void dyn_idx_gpu(int64_t* o) { - auto d = paddle::framework::make_dim(5, 6); + auto d = pten::framework::make_dim(5, 6); o[0] = d[1]; } TEST(Dim, Equality) { // construct a Dim on the CPU - auto a = paddle::framework::make_dim(3, 4); + auto a = pten::framework::make_dim(3, 4); EXPECT_EQ(a[0], 3); EXPECT_EQ(a[1], 4); // construct a Dim on the GPU - thrust::device_vector> t(2); + thrust::device_vector> t(2); #ifdef PADDLE_WITH_HIP - hipLaunchKernelGGL(test, dim3(1), dim3(1), 0, 0, - thrust::raw_pointer_cast(t.data())); + hipLaunchKernelGGL( + test, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(t.data())); #else test<<<1, 1>>>(thrust::raw_pointer_cast(t.data())); #endif @@ -45,10 +46,10 @@ TEST(Dim, Equality) { EXPECT_EQ(a[1], 6); // product - EXPECT_EQ(paddle::framework::product(a), 30); + EXPECT_EQ(pten::framework::product(a), 30); // mutate a Dim - auto b = paddle::framework::make_dim(7, 8); + auto b = pten::framework::make_dim(7, 8); b[1] = 10; EXPECT_EQ(b[0], 7); EXPECT_EQ(b[1], 10); @@ -61,8 +62,8 @@ TEST(Dim, Equality) { // dynamic access on GPU thrust::device_vector r(1); #ifdef PADDLE_WITH_HIP - hipLaunchKernelGGL(dyn_idx_gpu, dim3(1), dim3(1), 0, 0, - thrust::raw_pointer_cast(r.data())); + hipLaunchKernelGGL( + dyn_idx_gpu, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(r.data())); #else dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data())); #endif @@ -71,9 +72,9 @@ TEST(Dim, Equality) { } TEST(Dim, Bool) { - auto a = paddle::framework::make_dim(3, 4); - auto b = paddle::framework::make_dim(5, 6); - auto c = paddle::framework::make_dim(3, 4); + auto a = pten::framework::make_dim(3, 4); + auto b = pten::framework::make_dim(5, 6); + auto c = pten::framework::make_dim(3, 4); // comparison EXPECT_TRUE(a == a); @@ -84,13 +85,13 @@ TEST(Dim, Bool) { TEST(Dim, Print) { { std::stringstream ss; - auto a = paddle::framework::make_dim(2, 3); + auto a = pten::framework::make_dim(2, 3); ss << a; EXPECT_EQ(ss.str(), "2, 3"); } { std::stringstream ss; - ss << paddle::framework::make_dim(8); + ss << pten::framework::make_dim(8); EXPECT_EQ(ss.str(), "8"); } -} +} \ No newline at end of file diff --git a/paddle/fluid/platform/hostdevice.h b/paddle/pten/core/hostdevice.h similarity index 89% rename from paddle/fluid/platform/hostdevice.h rename to paddle/pten/core/hostdevice.h index 65005a5adb..08fe312528 100644 --- a/paddle/fluid/platform/hostdevice.h +++ b/paddle/pten/core/hostdevice.h @@ -1,16 +1,17 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + #pragma once #ifdef __HIPCC__ diff --git a/paddle/pten/core/tensor_base.h b/paddle/pten/core/tensor_base.h index 528a52cee8..662553cbcb 100644 --- a/paddle/pten/core/tensor_base.h +++ b/paddle/pten/core/tensor_base.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/platform/place.h" #include "paddle/pten/common/backend.h" #include "paddle/pten/common/data_type.h" #include "paddle/pten/common/layout.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/storage.h" #include "paddle/pten/core/utils/type_registry.h" @@ -28,7 +28,7 @@ class TensorBase { public: using DataType = paddle::experimental::DataType; using DataLayout = paddle::experimental::DataLayout; - using DDim = paddle::framework::DDim; + using DDim = pten::framework::DDim; using Place = paddle::platform::Place; virtual ~TensorBase() = default; diff --git a/paddle/pten/core/tensor_meta.h b/paddle/pten/core/tensor_meta.h index 2df6b48b67..ac3f17267c 100644 --- a/paddle/pten/core/tensor_meta.h +++ b/paddle/pten/core/tensor_meta.h @@ -21,7 +21,7 @@ limitations under the License. */ #include "paddle/pten/common/layout.h" // See Note [ Why still include the fluid headers? ] -#include "paddle/fluid/framework/ddim.h" +#include "paddle/pten/core/ddim.h" // Note: mixed_vector include many header now, LoD will be // used on CUDA device? Can we use small_vector here? @@ -30,7 +30,7 @@ limitations under the License. */ namespace pten { -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; using LoD = std::vector>; /// \brief The meta data of dense tensor. Take the structure type /// and use all default operations. diff --git a/paddle/fluid/framework/unroll_array_ops.h b/paddle/pten/core/unroll_array_ops.h similarity index 96% rename from paddle/fluid/framework/unroll_array_ops.h rename to paddle/pten/core/unroll_array_ops.h index a9c047cc6c..fb0358375a 100644 --- a/paddle/fluid/framework/unroll_array_ops.h +++ b/paddle/pten/core/unroll_array_ops.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #include #include -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" -namespace paddle { +namespace pten { namespace framework { namespace detail { @@ -130,4 +130,4 @@ template using UnrollProduct = detail::UnrollProduct<0, N, N == 0>; } // namespace framework -} // namespace paddle +} // namespace pten diff --git a/paddle/fluid/framework/unroll_array_ops_test.cc b/paddle/pten/core/unroll_array_ops_test.cc similarity index 92% rename from paddle/fluid/framework/unroll_array_ops_test.cc rename to paddle/pten/core/unroll_array_ops_test.cc index c4fdfdb425..f32d94be75 100644 --- a/paddle/fluid/framework/unroll_array_ops_test.cc +++ b/paddle/pten/core/unroll_array_ops_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/unroll_array_ops.h" +#include "paddle/pten/core/unroll_array_ops.h" #include #include -namespace paddle { +namespace pten { namespace framework { template @@ -79,4 +79,4 @@ TEST(unroll_ops, product) { } } // namespace framework -} // namespace paddle +} // namespace pten \ No newline at end of file diff --git a/paddle/pten/infermeta/binary.cc b/paddle/pten/infermeta/binary.cc index ea587806bf..083fb0fca2 100644 --- a/paddle/pten/infermeta/binary.cc +++ b/paddle/pten/infermeta/binary.cc @@ -64,8 +64,8 @@ DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, const DenseTensorMeta& y_meta, bool trans_x, bool trans_y) { - std::vector dims_x = paddle::framework::vectorize(x_meta.dims); - std::vector dims_y = paddle::framework::vectorize(y_meta.dims); + std::vector dims_x = pten::framework::vectorize(x_meta.dims); + std::vector dims_y = pten::framework::vectorize(y_meta.dims); auto ndims_x = dims_x.size(); auto ndims_y = dims_y.size(); PADDLE_ENFORCE_GT(ndims_x, @@ -125,7 +125,7 @@ DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, new_dims.push_back(1); } - auto ddim_out = paddle::framework::make_ddim(new_dims); + auto ddim_out = pten::framework::make_ddim(new_dims); return {x_meta.dtype, ddim_out, x_meta.layout}; } @@ -169,7 +169,7 @@ DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, out_dims_array.data(), max_dim, axis); - return_meta.dims = paddle::framework::make_ddim(out_dims_array); + return_meta.dims = pten::framework::make_ddim(out_dims_array); } return_meta.lod = x_meta.lod; return return_meta; diff --git a/paddle/pten/infermeta/nullary.cc b/paddle/pten/infermeta/nullary.cc index 731e69e609..19e11f049f 100644 --- a/paddle/pten/infermeta/nullary.cc +++ b/paddle/pten/infermeta/nullary.cc @@ -20,14 +20,14 @@ namespace pten { DenseTensorMeta CreateInferMeta(const std::vector& shape, DataType dtype, DataLayout layout) { - const auto& out_dims = paddle::framework::make_ddim(shape); + const auto& out_dims = pten::framework::make_ddim(shape); return {dtype, out_dims, layout}; } DenseTensorMeta CreateInferMeta(const ScalarArray& shape, DataType dtype, DataLayout layout) { - const auto& out_dims = paddle::framework::make_ddim(shape.GetData()); + const auto& out_dims = pten::framework::make_ddim(shape.GetData()); return {dtype, out_dims, layout}; } diff --git a/paddle/pten/infermeta/unary.cc b/paddle/pten/infermeta/unary.cc index 843a78f341..27e1dc9511 100644 --- a/paddle/pten/infermeta/unary.cc +++ b/paddle/pten/infermeta/unary.cc @@ -23,7 +23,7 @@ DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta) { } DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta) { - const auto& out_dims = paddle::framework::make_ddim({1}); + const auto& out_dims = pten::framework::make_ddim({1}); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); return return_meta; } @@ -63,7 +63,7 @@ DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, for (int i = stop_axis + 1; i < in_dims_size; i++) { out_shape.push_back(x_dims[i]); } - const auto& out_dims = paddle::framework::make_ddim(out_shape); + const auto& out_dims = pten::framework::make_ddim(out_shape); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); if (x_dims[0] == return_meta.dims[0]) { @@ -89,10 +89,10 @@ DenseTensorMeta CreateLikeInferMeta(const DenseTensorMeta& x_meta, layout == DataLayout::UNDEFINED ? x_meta.layout : layout}; } -static paddle::framework::DDim ValidateShape( - const std::vector shape, const paddle::framework::DDim& in_dims) { - const int64_t in_size = paddle::framework::product(in_dims); - auto in_dims_vec = paddle::framework::vectorize(in_dims); +static pten::framework::DDim ValidateShape( + const std::vector shape, const pten::framework::DDim& in_dims) { + const int64_t in_size = pten::framework::product(in_dims); + auto in_dims_vec = pten::framework::vectorize(in_dims); bool all_positive = std::all_of(in_dims_vec.cbegin(), in_dims_vec.cend(), [](int64_t i) { return i > 0; }); @@ -112,7 +112,7 @@ static paddle::framework::DDim ValidateShape( paddle::platform::errors::InvalidArgument( "Only one dimension value of 'shape' in ReshapeOp can " "be -1. But received shape = [%s], shape[%d] is also -1.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i)); unk_dim_idx = i; } else if (shape[i] == copy_dim_val) { @@ -124,7 +124,7 @@ static paddle::framework::DDim ValidateShape( "the input tensor X's dimensions. " "But received shape = [%s], shape[%d] = 0, X's shape = [%s], " "X's dimensions = %d.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i, in_dims, in_dims.size())); @@ -136,7 +136,7 @@ static paddle::framework::DDim ValidateShape( "Each dimension value of 'shape' in ReshapeOp must not " "be negative except one unknown dimension. " "But received shape = [%s], shape[%d] = %d.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i, shape[i])); } @@ -165,7 +165,7 @@ static paddle::framework::DDim ValidateShape( "'shape' is [%s], known capacity of 'shape' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } else { output_shape[unk_dim_idx] = -1; @@ -183,7 +183,7 @@ static paddle::framework::DDim ValidateShape( "[%s], the capacity of 'shape' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } } @@ -202,11 +202,11 @@ static paddle::framework::DDim ValidateShape( "capacity of 'Out' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } - return paddle::framework::make_ddim(output_shape); + return pten::framework::make_ddim(output_shape); } DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, @@ -267,7 +267,7 @@ DenseTensorMeta ReduceInferMeta(const DenseTensorMeta& x_meta, out_dim_vector.push_back(1); } } - DDim out_dim = paddle::framework::make_ddim(out_dim_vector); + DDim out_dim = pten::framework::make_ddim(out_dim_vector); DataType out_dtype; if (dtype != DataType::UNDEFINED) { diff --git a/paddle/pten/kernels/cpu/elementwise.h b/paddle/pten/kernels/cpu/elementwise.h index f048678111..e4f426d3f8 100644 --- a/paddle/pten/kernels/cpu/elementwise.h +++ b/paddle/pten/kernels/cpu/elementwise.h @@ -583,8 +583,8 @@ void CommonElementwiseBroadcastBackward(const CPUContext& ctx, } VLOG(3) << "CommonElementwiseBroadcastBackward xdims:" - << paddle::framework::make_ddim(x_dims_array) - << " ydim:" << paddle::framework::make_ddim(y_dims_array); + << pten::framework::make_ddim(x_dims_array) + << " ydim:" << pten::framework::make_ddim(y_dims_array); CommonGradBroadcastCPU(x, y, diff --git a/paddle/pten/kernels/cpu/reduce.h b/paddle/pten/kernels/cpu/reduce.h index b38f17aa02..86443c254b 100644 --- a/paddle/pten/kernels/cpu/reduce.h +++ b/paddle/pten/kernels/cpu/reduce.h @@ -50,13 +50,13 @@ void ReduceFunctor(const DeviceContext& context, DDim out_dims = output->dims(); if (keep_dim && x_rank > 1) { const int kDelFlag = -2; - auto dims_vector = paddle::framework::vectorize(out_dims); + auto dims_vector = pten::framework::vectorize(out_dims); for (size_t i = 0; i < dims_ref.size(); ++i) { dims_vector[dims_ref[i]] = kDelFlag; } dims_vector.erase(remove(dims_vector.begin(), dims_vector.end(), kDelFlag), dims_vector.end()); - out_dims = paddle::framework::make_ddim(dims_vector); + out_dims = pten::framework::make_ddim(dims_vector); } auto& place = *context.eigen_device(); Functor functor; diff --git a/paddle/pten/kernels/empty_kernel.cc b/paddle/pten/kernels/empty_kernel.cc index d6a155dca0..2deac0146c 100644 --- a/paddle/pten/kernels/empty_kernel.cc +++ b/paddle/pten/kernels/empty_kernel.cc @@ -24,7 +24,7 @@ template void EmptyKernel(const Context& dev_ctx, const ScalarArray& shape, DenseTensor* out) { - out->ResizeAndAllocate(paddle::framework::make_ddim(shape.GetData())); + out->ResizeAndAllocate(pten::framework::make_ddim(shape.GetData())); } template diff --git a/paddle/pten/kernels/flatten_grad_kernel.cc b/paddle/pten/kernels/flatten_grad_kernel.cc index e45ac516e1..cbbf62f199 100644 --- a/paddle/pten/kernels/flatten_grad_kernel.cc +++ b/paddle/pten/kernels/flatten_grad_kernel.cc @@ -25,8 +25,7 @@ void FlattenGradKernel(const Context& dev_ctx, const DenseTensor& xshape, DenseTensor* x_grad) { auto xshape_dims = xshape.dims(); - auto x_dims = - paddle::framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + auto x_dims = pten::framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); pten::Copy(dev_ctx, out_grad, false, x_grad); x_grad->ResizeAndAllocate(x_dims); } diff --git a/paddle/pten/kernels/funcs/common_shape.h b/paddle/pten/kernels/funcs/common_shape.h index 8693fd2b36..6bb45ad199 100644 --- a/paddle/pten/kernels/funcs/common_shape.h +++ b/paddle/pten/kernels/funcs/common_shape.h @@ -26,7 +26,7 @@ inline void SetXShape(const DenseTensor &x, DenseTensor *xshape) { for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->ResizeAndAllocate(paddle::framework::make_ddim(xshape_dims)); + xshape->ResizeAndAllocate(pten::framework::make_ddim(xshape_dims)); xshape->ResetLoD(x.meta().lod); } diff --git a/paddle/pten/kernels/funcs/elementwise_base.h b/paddle/pten/kernels/funcs/elementwise_base.h index 7396c64de9..47924c4e2a 100644 --- a/paddle/pten/kernels/funcs/elementwise_base.h +++ b/paddle/pten/kernels/funcs/elementwise_base.h @@ -36,10 +36,10 @@ enum ElementwiseType { kUnary = 1, kBinary = 2, kTernary = 3, kAny = -1 }; for supporting multiple-output feature in elementwise system.*/ template using ConditionalT = - typename std::conditional_t>; + typename std::conditional_t>; namespace funcs { -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; template struct ElemwiseGradNoBroadcast { @@ -303,9 +303,9 @@ inline DDim trim_trailing_singular_dims(const DDim &dims) { trim_dims[i] = dims[i]; } if (trim_dims.size() == 0) { - return DDim(paddle::framework::make_dim()); + return DDim(pten::framework::make_dim()); } - DDim actual_dims = paddle::framework::make_ddim(trim_dims); + DDim actual_dims = pten::framework::make_ddim(trim_dims); return actual_dims; } @@ -377,7 +377,7 @@ void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx, DenseTensor *dy, DX_OP dx_op, DY_OP dy_op) { - size_t N = static_cast(paddle::framework::product(x_dim)); + size_t N = static_cast(pten::framework::product(x_dim)); paddle::platform::ForRange for_range(dev_ctx, N); for_range(ElemwiseGradNoBroadcast{ x.data(), @@ -462,7 +462,7 @@ struct ElementwisePrimitiveCaller { template struct ElementwiseWriteDataCaller { __device__ __forceinline__ void operator()( - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, ConditionalT src[VecSize], int block_offset, int num) { @@ -485,7 +485,7 @@ struct ElementwiseWriteDataCaller { template struct ElementwiseWriteDataCaller { __device__ __forceinline__ void operator()( - paddle::framework::Array<_ptr_ OutT *, 1> outs, + pten::framework::Array<_ptr_ OutT *, 1> outs, OutT src[VecSize], int block_offset, int num) { @@ -502,8 +502,8 @@ template __device__ void VectorizedElementwiseKernelImpl( - const paddle::framework::Array &in, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + const pten::framework::Array &in, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, int num, int data_offset, Functor func) { @@ -537,8 +537,8 @@ template __global__ void VectorizedElementwiseKernel( - paddle::framework::Array ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, int size, int main_offset, Functor func) { @@ -578,8 +578,8 @@ void ElementwiseCudaKernel(const KPDevice &ctx, std::vector *outs, Functor func) { auto numel = ins[0]->numel(); - paddle::framework::Array ins_data; - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs_data; + pten::framework::Array ins_data; + pten::framework::Array<_ptr_ OutT *, NumOuts> outs_data; for (int i = 0; i < Arity; ++i) { ins_data[i] = ins[i]->data(); diff --git a/paddle/pten/kernels/funcs/elementwise_functor.h b/paddle/pten/kernels/funcs/elementwise_functor.h index 6b89902456..6d139d6853 100644 --- a/paddle/pten/kernels/funcs/elementwise_functor.h +++ b/paddle/pten/kernels/funcs/elementwise_functor.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/pten/common/float16.h" +#include "paddle/pten/core/hostdevice.h" namespace pten { namespace funcs { diff --git a/paddle/pten/kernels/funcs/transpose.cc b/paddle/pten/kernels/funcs/transpose.cc index 77d26fcbc3..90a6859a85 100644 --- a/paddle/pten/kernels/funcs/transpose.cc +++ b/paddle/pten/kernels/funcs/transpose.cc @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/pten/kernels/funcs/transpose.h" -#include "paddle/fluid/framework/ddim.h" #include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" // See Note [ Why still include the fluid headers? ] @@ -33,8 +33,8 @@ struct TransposeNormal { pten::DenseTensor* out, const std::vector& axis) { const int rank = axis.size(); - auto in_stride = paddle::framework::stride(in.dims()); - auto out_stride = paddle::framework::stride(out->dims()); + auto in_stride = pten::framework::stride(in.dims()); + auto out_stride = pten::framework::stride(out->dims()); const T* in_ptr = in.data(); T* out_ptr = out->mutable_data(); diff --git a/paddle/pten/kernels/funcs/transpose.cu b/paddle/pten/kernels/funcs/transpose.cu index 045bfdbdb0..474a7c4ea4 100644 --- a/paddle/pten/kernels/funcs/transpose.cu +++ b/paddle/pten/kernels/funcs/transpose.cu @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/kernels/funcs/transpose.h" @@ -58,8 +58,8 @@ struct TransposeNormal { pten::DenseTensor* out, const std::vector& axis) { const int rank = axis.size(); - auto in_stride = paddle::framework::stride(in.dims()); - auto out_stride = paddle::framework::stride(out->dims()); + auto in_stride = pten::framework::stride(in.dims()); + auto out_stride = pten::framework::stride(out->dims()); auto* in_ptr = in.data(); auto* out_ptr = out->mutable_data(); diff --git a/paddle/pten/kernels/funcs/transpose.h b/paddle/pten/kernels/funcs/transpose.h index d0e4dafe2c..0cb2b4289f 100644 --- a/paddle/pten/kernels/funcs/transpose.h +++ b/paddle/pten/kernels/funcs/transpose.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/framework/ddim.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/fluid/operators/eigen/eigen_function.h" diff --git a/paddle/pten/kernels/gpu/elementwise.h b/paddle/pten/kernels/gpu/elementwise.h index c3ff91e7b1..def54e2484 100644 --- a/paddle/pten/kernels/gpu/elementwise.h +++ b/paddle/pten/kernels/gpu/elementwise.h @@ -130,14 +130,14 @@ struct DimensionsTransform { public: explicit DimensionsTransform(const std::vector &ins, - const paddle::framework::DDim &dims, + const pten::framework::DDim &dims, int axis) { const int N = ins.size(); dim_size = dims.size(); - out_dims = paddle::framework::vectorize(dims); + out_dims = pten::framework::vectorize(dims); in_dims.resize(N); for (int j = 0; j < N; ++j) { - in_dims[j] = paddle::framework::vectorize(ins[j]->dims()); + in_dims[j] = pten::framework::vectorize(ins[j]->dims()); } InputDimensionsExtend(N, axis); @@ -214,11 +214,11 @@ template __device__ void ElementwiseBroadcastKernelImpl( - const paddle::framework::Array &ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, - const paddle::framework::Array &use_broadcast, + const pten::framework::Array &ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, + const pten::framework::Array &use_broadcast, uint32_t numel, - const paddle::framework::Array, Arity> + const pten::framework::Array, Arity> &configs, int num, int block_offset, @@ -259,12 +259,11 @@ template __global__ void ElementwiseBroadcastKernel( - paddle::framework::Array ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, - paddle::framework::Array use_broadcast, + pten::framework::Array ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array use_broadcast, uint32_t numel, - paddle::framework::Array, Arity> - configs, + pten::framework::Array, Arity> configs, int main_offset, int tail_tid, Functor func) { @@ -345,10 +344,10 @@ void LaunchKernel(const KPDevice &ctx, Functor func, DimensionsTransform merge_dims) { int numel = (*outs)[0]->numel(); - paddle::framework::Array, Arity> configs; - paddle::framework::Array use_broadcast; - paddle::framework::Array ins_data; - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs_data; + pten::framework::Array, Arity> configs; + pten::framework::Array use_broadcast; + pten::framework::Array ins_data; + pten::framework::Array<_ptr_ OutT *, NumOuts> outs_data; for (int i = 0; i < NumOuts; ++i) { outs_data[i] = (*outs)[i]->mutable_data(); @@ -444,7 +443,7 @@ void LaunchBroadcastKernelForDifferentVecSize( "The maximum dimension of input tensor is expected to be less than " "%d, but recieved %d.\n", merge_dims.dim_size, - paddle::framework::DDim::kMaxRank)); + pten::framework::DDim::kMaxRank)); } } #undef CALL_BROADCAST_FOR_DIM_SIZE @@ -1826,8 +1825,8 @@ void CommonElementwiseBroadcastBackward(const GPUContext &ctx, } VLOG(3) << "CommonElementwiseBroadcastBackward xdims:" - << paddle::framework::make_ddim(x_dims_array) - << " ydim:" << paddle::framework::make_ddim(y_dims_array); + << pten::framework::make_ddim(x_dims_array) + << " ydim:" << pten::framework::make_ddim(y_dims_array); CommonGradBroadcastCUDA(x, y, diff --git a/paddle/pten/kernels/gpu/reduce.h b/paddle/pten/kernels/gpu/reduce.h index e7d1d2d5f4..e247f786cc 100644 --- a/paddle/pten/kernels/gpu/reduce.h +++ b/paddle/pten/kernels/gpu/reduce.h @@ -32,7 +32,6 @@ namespace cub = hipcub; #endif -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" @@ -41,6 +40,7 @@ namespace cub = hipcub; #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/fast_divmod.h" #include "paddle/fluid/string/string_helper.h" +#include "paddle/pten/core/array.h" #include "paddle/pten/api/ext/dispatch.h" #include "paddle/pten/backends/gpu/gpu_context.h" @@ -118,7 +118,7 @@ static inline void CheckReduceRank(int reduce_rank, int rank) { // convert dims from vector to array template -static inline paddle::framework::Array VectorToArray( +static inline pten::framework::Array VectorToArray( const VectorLikeType& vec) { PADDLE_ENFORCE_LE(vec.size(), ElementCount, @@ -128,7 +128,7 @@ static inline paddle::framework::Array VectorToArray( vec.size(), ElementCount)); size_t n = static_cast(vec.size()); - paddle::framework::Array ret; + pten::framework::Array ret; for (size_t i = 0; i < n; ++i) { ret[i] = vec[i]; } @@ -162,7 +162,7 @@ static inline std::vector GetReduceDim(const std::vector& dims, } // namespace details -constexpr int kMaxRank = paddle::framework::DDim::kMaxRank; +constexpr int kMaxRank = pten::framework::DDim::kMaxRank; enum ReduceType { kReduceLastDim = 0x01, // when reduce_dim[0] == x_dim.size() - 1; @@ -202,9 +202,9 @@ struct IndexCalculator { } int dim; - paddle::framework::Array dims; - paddle::framework::Array strides; - paddle::framework::Array divmoders; + pten::framework::Array dims; + pten::framework::Array strides; + pten::framework::Array divmoders; }; template @@ -326,7 +326,7 @@ struct ReduceConfig { const paddle::platform::Place& place, pten::DenseTensor* tmp) { if (should_reduce_again) { - tmp->ResizeAndAllocate(paddle::framework::make_ddim( + tmp->ResizeAndAllocate(pten::framework::make_ddim( {static_cast(left_num * grid.z * grid.y * sizeof(Ty))})); output_data = tmp->mutable_data(); } else { @@ -1029,7 +1029,7 @@ static pten::DenseTensor tmp = pten::DenseTensor( pten::make_intrusive(place), pten::DenseTensorMeta(pten::DataType::UINT8, - paddle::framework::make_ddim( + pten::framework::make_ddim( {static_cast(temp_storage_bytes)}))); auto* temp_storage = tmp.mutable_data(); @@ -1073,7 +1073,7 @@ void TensorReduceFunctorImpl(const pten::DenseTensor& x, // Allocate memory y->mutable_data(); - auto x_dim = paddle::framework::vectorize(x.dims()); + auto x_dim = pten::framework::vectorize(x.dims()); auto config = ReduceConfig(origin_reduce_dims, x_dim); config.Run(); int numel = x.numel(); diff --git a/paddle/pten/kernels/impl/dot_grad_kernel_impl.h b/paddle/pten/kernels/impl/dot_grad_kernel_impl.h index 39cdbad514..557f6fae7b 100644 --- a/paddle/pten/kernels/impl/dot_grad_kernel_impl.h +++ b/paddle/pten/kernels/impl/dot_grad_kernel_impl.h @@ -103,7 +103,7 @@ struct DotGradFunctionmutable_data(); const auto* data_y = tensor_y->data(); const DDim& dim = tensor_x->dims(); - size_t N = static_cast(paddle::framework::product(dim)); + size_t N = static_cast(pten::framework::product(dim)); auto step = dim[dim.size() - 1]; @@ -118,7 +118,7 @@ struct DotGradFunctionmutable_data(); const auto* data_x = tensor_x->data(); const DDim& dim = tensor_y->dims(); - size_t N = static_cast(paddle::framework::product(dim)); + size_t N = static_cast(pten::framework::product(dim)); auto step = dim[dim.size() - 1]; diff --git a/paddle/pten/kernels/impl/full_kernel_impl.h b/paddle/pten/kernels/impl/full_kernel_impl.h index 134a815799..2900e2e83b 100644 --- a/paddle/pten/kernels/impl/full_kernel_impl.h +++ b/paddle/pten/kernels/impl/full_kernel_impl.h @@ -36,7 +36,7 @@ void FullKernel(const Context& dev_ctx, const ScalarArray& shape, const Scalar& val, DenseTensor* out) { - out->ResizeAndAllocate(paddle::framework::make_ddim(shape.GetData())); + out->ResizeAndAllocate(pten::framework::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); } diff --git a/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h b/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h index b1bae78ddc..71fadfae7d 100644 --- a/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h +++ b/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h @@ -135,7 +135,7 @@ static DDim RowMatrixFromVector(const DDim& x_dim) { if (x_dim.size() > 1) { return x_dim; } - return paddle::framework::make_ddim({1, x_dim[0]}); + return pten::framework::make_ddim({1, x_dim[0]}); } /** @@ -146,7 +146,7 @@ static DDim ColumnMatrixFromVector(const DDim& y_dim) { if (y_dim.size() > 1) { return y_dim; } - return paddle::framework::make_ddim({y_dim[0], 1}); + return pten::framework::make_ddim({y_dim[0], 1}); } /** diff --git a/paddle/pten/kernels/impl/matmul_kernel_impl.h b/paddle/pten/kernels/impl/matmul_kernel_impl.h index 5ea9729655..afe6bf71e2 100644 --- a/paddle/pten/kernels/impl/matmul_kernel_impl.h +++ b/paddle/pten/kernels/impl/matmul_kernel_impl.h @@ -164,7 +164,7 @@ void MatMulFunction(const Context& dev_ctx, std::copy_n(y_dims.cbegin(), y_ndim - 2, out_dims.begin()); out_dims.back() = y_dims.back(); } - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_dims)); Out->mutable_data(); if (trans_y) { const int M = Y.numel() / N; @@ -242,7 +242,7 @@ void MatMulFunction(const Context& dev_ctx, } else { std::copy_n(x_dims.cbegin(), x_ndim - 1, out_dims.begin()); } - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_dims)); Out->mutable_data(); if (trans_x) { @@ -330,7 +330,7 @@ void MatMulFunction(const Context& dev_ctx, out_broadcast_dims[ndim - 2] = M; out_broadcast_dims[ndim - 1] = N; - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_broadcast_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_broadcast_dims)); Out->mutable_data(); const int batch_dim = ndim - 2; @@ -493,12 +493,12 @@ void MatmulKernel(const Context& dev_ctx, bool transpose_x, bool transpose_y, DenseTensor* out) { - PADDLE_ENFORCE_NE(paddle::framework::product(x.dims()), + PADDLE_ENFORCE_NE(pten::framework::product(x.dims()), 0, paddle::platform::errors::InvalidArgument( "The Input(X) dims size must not be equal 0," " but reviced dims size is 0. ")); - PADDLE_ENFORCE_NE(paddle::framework::product(y.dims()), + PADDLE_ENFORCE_NE(pten::framework::product(y.dims()), 0, paddle::platform::errors::InvalidArgument( "The Input(Y) dims size must not be equal 0," diff --git a/paddle/pten/tests/api/test_cast_api.cc b/paddle/pten/tests/api/test_cast_api.cc index 6608d1ed08..0a3b56e3f1 100644 --- a/paddle/pten/tests/api/test_cast_api.cc +++ b/paddle/pten/tests/api/test_cast_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, cast) { diff --git a/paddle/pten/tests/api/test_conj_api.cc b/paddle/pten/tests/api/test_conj_api.cc index 50d190257a..c17b0f23f4 100644 --- a/paddle/pten/tests/api/test_conj_api.cc +++ b/paddle/pten/tests/api/test_conj_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, conj) { diff --git a/paddle/pten/tests/api/test_dot_api.cc b/paddle/pten/tests/api/test_dot_api.cc index 40e709b960..97616d0cbc 100644 --- a/paddle/pten/tests/api/test_dot_api.cc +++ b/paddle/pten/tests/api/test_dot_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, dot) { diff --git a/paddle/pten/tests/api/test_elementwise_api.cc b/paddle/pten/tests/api/test_elementwise_api.cc index 69af32eb45..17a6ffde9d 100644 --- a/paddle/pten/tests/api/test_elementwise_api.cc +++ b/paddle/pten/tests/api/test_elementwise_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, add) { diff --git a/paddle/pten/tests/api/test_empty_api.cc b/paddle/pten/tests/api/test_empty_api.cc index f4e3f472c7..f38e91b02b 100644 --- a/paddle/pten/tests/api/test_empty_api.cc +++ b/paddle/pten/tests/api/test_empty_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, empty_like) { diff --git a/paddle/pten/tests/api/test_fill_api.cc b/paddle/pten/tests/api/test_fill_api.cc index 0d82376568..7910cc840f 100644 --- a/paddle/pten/tests/api/test_fill_api.cc +++ b/paddle/pten/tests/api/test_fill_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, full_like) { diff --git a/paddle/pten/tests/api/test_flatten_api.cc b/paddle/pten/tests/api/test_flatten_api.cc index 6c082b9653..cf8fa9cb18 100644 --- a/paddle/pten/tests/api/test_flatten_api.cc +++ b/paddle/pten/tests/api/test_flatten_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, flatten) { diff --git a/paddle/pten/tests/api/test_matmul_api.cc b/paddle/pten/tests/api/test_matmul_api.cc index 03f686f1c3..08e0e888b9 100644 --- a/paddle/pten/tests/api/test_matmul_api.cc +++ b/paddle/pten/tests/api/test_matmul_api.cc @@ -26,7 +26,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(API, matmul_cpu) { // 1. create tensor diff --git a/paddle/pten/tests/api/test_mean_api.cc b/paddle/pten/tests/api/test_mean_api.cc index 9d90e58101..a7b85cff12 100644 --- a/paddle/pten/tests/api/test_mean_api.cc +++ b/paddle/pten/tests/api/test_mean_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, mean) { diff --git a/paddle/pten/tests/api/test_reshape_api.cc b/paddle/pten/tests/api/test_reshape_api.cc index 59e9e9fab1..bfd1ea8414 100644 --- a/paddle/pten/tests/api/test_reshape_api.cc +++ b/paddle/pten/tests/api/test_reshape_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, reshape) { diff --git a/paddle/pten/tests/api/test_scale_api.cc b/paddle/pten/tests/api/test_scale_api.cc index 5ad5214276..bb5523d26c 100644 --- a/paddle/pten/tests/api/test_scale_api.cc +++ b/paddle/pten/tests/api/test_scale_api.cc @@ -24,7 +24,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; void CheckScaleResult(experimental::Tensor* out) { ASSERT_EQ(out->dims().size(), 2); diff --git a/paddle/pten/tests/api/test_sum_api.cc b/paddle/pten/tests/api/test_sum_api.cc index 5a7c9840e1..c0d5a89eeb 100644 --- a/paddle/pten/tests/api/test_sum_api.cc +++ b/paddle/pten/tests/api/test_sum_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, sum) { diff --git a/paddle/pten/tests/api/test_to_api.cc b/paddle/pten/tests/api/test_to_api.cc index 9aef716029..fa999aace6 100644 --- a/paddle/pten/tests/api/test_to_api.cc +++ b/paddle/pten/tests/api/test_to_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; paddle::experimental::Tensor CreateInputTensor() { const auto alloc = std::make_unique( diff --git a/paddle/pten/tests/kernels/test_cast_dev_api.cc b/paddle/pten/tests/kernels/test_cast_dev_api.cc index 80328d0b24..3b1412a8e5 100644 --- a/paddle/pten/tests/kernels/test_cast_dev_api.cc +++ b/paddle/pten/tests/kernels/test_cast_dev_api.cc @@ -28,7 +28,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, cast) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_conj_dev_api.cc b/paddle/pten/tests/kernels/test_conj_dev_api.cc index 6f2ea0602b..51066d8ae4 100644 --- a/paddle/pten/tests/kernels/test_conj_dev_api.cc +++ b/paddle/pten/tests/kernels/test_conj_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, conj) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_copy_dev_api.cc b/paddle/pten/tests/kernels/test_copy_dev_api.cc index d690b29d71..4f8bd72771 100644 --- a/paddle/pten/tests/kernels/test_copy_dev_api.cc +++ b/paddle/pten/tests/kernels/test_copy_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(YuanRisheng): This TEST file need to be refactored after 'copy' realized // in 'paddle/api' diff --git a/paddle/pten/tests/kernels/test_creation_dev_api.cc b/paddle/pten/tests/kernels/test_creation_dev_api.cc index b1c23d4a76..1aa21b847f 100644 --- a/paddle/pten/tests/kernels/test_creation_dev_api.cc +++ b/paddle/pten/tests/kernels/test_creation_dev_api.cc @@ -27,7 +27,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, empty) { // 1. create input diff --git a/paddle/pten/tests/kernels/test_dot_dev_api.cc b/paddle/pten/tests/kernels/test_dot_dev_api.cc index 4213240f57..e4978d84c8 100644 --- a/paddle/pten/tests/kernels/test_dot_dev_api.cc +++ b/paddle/pten/tests/kernels/test_dot_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, dot) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc index 23583a8435..0bc16371c0 100644 --- a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc +++ b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, add) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_flatten_dev_api.cc b/paddle/pten/tests/kernels/test_flatten_dev_api.cc index 13fc327b66..78cd6261c3 100644 --- a/paddle/pten/tests/kernels/test_flatten_dev_api.cc +++ b/paddle/pten/tests/kernels/test_flatten_dev_api.cc @@ -36,7 +36,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, flatten) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_matmul_dev_api.cc b/paddle/pten/tests/kernels/test_matmul_dev_api.cc index 118215db50..76f7750319 100644 --- a/paddle/pten/tests/kernels/test_matmul_dev_api.cc +++ b/paddle/pten/tests/kernels/test_matmul_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, dot) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_mean_dev_api.cc b/paddle/pten/tests/kernels/test_mean_dev_api.cc index a8860540fd..07ec30afad 100644 --- a/paddle/pten/tests/kernels/test_mean_dev_api.cc +++ b/paddle/pten/tests/kernels/test_mean_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, mean) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_reshape_dev_api.cc b/paddle/pten/tests/kernels/test_reshape_dev_api.cc index 52038593d7..dc90043305 100644 --- a/paddle/pten/tests/kernels/test_reshape_dev_api.cc +++ b/paddle/pten/tests/kernels/test_reshape_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(DEV_API, reshape) { diff --git a/paddle/pten/tests/kernels/test_scale_dev_api.cc b/paddle/pten/tests/kernels/test_scale_dev_api.cc index 1c0be6c06a..106835a204 100644 --- a/paddle/pten/tests/kernels/test_scale_dev_api.cc +++ b/paddle/pten/tests/kernels/test_scale_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, scale) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_sum_dev_api.cc b/paddle/pten/tests/kernels/test_sum_dev_api.cc index 2b11ba9595..41d694a025 100644 --- a/paddle/pten/tests/kernels/test_sum_dev_api.cc +++ b/paddle/pten/tests/kernels/test_sum_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, sum) { // 1. create tensor -- GitLab