diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 902943d14ff9d343a3ae47a09f0a5835f20d0414..83e5c1c17925e2973891ec949e27469c569b7b42 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -55,14 +55,6 @@ proto_library(trainer_desc_proto SRCS trainer_desc.proto DEPS framework_proto cc_library(string_array SRCS string_array.cc DEPS utf8proc) -cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) -cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) -if(WITH_GPU) - nv_test(dim_test SRCS dim_test.cu DEPS ddim) -elseif(WITH_ROCM) - hip_test(dim_test SRCS dim_test.cu DEPS ddim) -endif() -cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) cc_library(data_type SRCS data_type.cc DEPS framework_proto ddim device_context) cc_test(data_type_test SRCS data_type_test.cc DEPS data_type place tensor) if(WITH_GPU) diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index 565e0b430dfdc137811e7a2b3f4280162c2d35fb..d150cca9d4c679d773d8b7b4019d8bbfd39ec3ec 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -14,237 +14,13 @@ limitations under the License. */ #pragma once -#include -#include -#include -#include - -#include "paddle/fluid/framework/dim.h" +#include "paddle/pten/core/ddim.h" namespace paddle { namespace framework { -#define PADDLE_VISIT_DDIM_BASE(rank, callback) \ - case (rank): { \ - constexpr auto kRank = (rank); \ - return (callback); \ - } - -#define PADDLE_VISIT_DDIM(rank, callback) \ - switch (rank) { \ - PADDLE_VISIT_DDIM_BASE(0, callback); \ - PADDLE_VISIT_DDIM_BASE(1, callback); \ - PADDLE_VISIT_DDIM_BASE(2, callback); \ - PADDLE_VISIT_DDIM_BASE(3, callback); \ - PADDLE_VISIT_DDIM_BASE(4, callback); \ - PADDLE_VISIT_DDIM_BASE(5, callback); \ - PADDLE_VISIT_DDIM_BASE(6, callback); \ - PADDLE_VISIT_DDIM_BASE(7, callback); \ - PADDLE_VISIT_DDIM_BASE(8, callback); \ - PADDLE_VISIT_DDIM_BASE(9, callback); \ - default: \ - PADDLE_THROW(platform::errors::Unimplemented( \ - "Invalid dimension to be accessed. Now only supports access to " \ - "dimension 0 to 9, but received dimension is %d.", \ - rank)); \ - } - -template -inline void dynamic_dim_assign(const T1* in, T2* out, int n) { - PADDLE_VISIT_DDIM(n, (static_dim_assign(in, out))); -} - -/** - * \brief A dynamically sized dimension. - * - * The number of dimensions must be between [1, 9]. - */ -class DDim { - public: - constexpr static int kMaxRank = 9; - - DDim() : rank_(1) { dim_[0] = 0; } - - DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); } - - DDim(const int* d, int n) : rank_(n) { - dynamic_dim_assign(d, dim_.GetMutable(), n); - } - - DDim(const int64_t* d, int n) : rank_(n) { - dynamic_dim_assign(d, dim_.GetMutable(), n); - } - - template - /*implicit*/ DDim(const Dim& in) : rank_(D) { // NOLINT - UnsafeCast() = in; - } - - /*implicit*/ DDim(std::initializer_list init_list) - : DDim(init_list.begin(), init_list.size()) {} - - inline DDim& operator=(const DDim& ddim) { return CopyFrom(ddim); } - - template - inline DDim& operator=(const Dim& dim) { - rank_ = D; - UnsafeCast() = dim; - return *this; - } - - inline int64_t& operator[](int idx) { return dim_[idx]; } - - inline int64_t operator[](int idx) const { return dim_[idx]; } - - int64_t& at(int idx) { - PADDLE_ENFORCE_GE(idx, 0, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - PADDLE_ENFORCE_LT(idx, rank_, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - return dim_[idx]; - } - - int64_t at(int idx) const { - PADDLE_ENFORCE_GE(idx, 0, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - PADDLE_ENFORCE_LT(idx, rank_, - platform::errors::InvalidArgument( - "Invalid DDim index to be accessed. The valid index " - "is between 0 and %d, but received index is %d.", - rank_, idx)); - return dim_[idx]; - } - - template - typename std::result_of&)>::type apply_visitor( - Visitor&& visitor) { - PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); - } - - template - typename std::result_of&)>::type apply_visitor( - Visitor&& visitor) const { - PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); - } - - bool operator==(const DDim& d) const; - - bool operator!=(const DDim& d) const; - - inline const int64_t* Get() const { return dim_.Get(); } - - inline int64_t* GetMutable() { return dim_.GetMutable(); } - - inline int size() const { return rank_; } - - std::string to_str() const; - - DDim reshape(const std::vector& shape) const; - - DDim transpose(const std::vector& axis) const; - - private: - template - inline Dim& UnsafeCast() { - static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); - auto* p = static_cast(&dim_); - return *reinterpret_cast*>(p); - } - - template - inline const Dim& UnsafeCast() const { - static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); - auto* p = static_cast(&dim_); - return *reinterpret_cast*>(p); - } - - inline DDim& CopyFrom(const DDim& ddim) { - PADDLE_VISIT_DDIM(ddim.rank_, (*this = ddim.UnsafeCast())); - } - - friend DDim stride(const DDim& ddim); - friend DDim stride_numel(const DDim& ddim); - - private: - Dim dim_; - int rank_; -}; - -#undef PADDLE_VISIT_DDIM_BASE -#undef PADDLE_VISIT_DDIM - -/** - * \brief Make a DDim from std::vector - * - * \param dims An vector of ints. Must be sized between [1, 9] - */ -DDim make_ddim(const std::vector& dims); - -DDim make_ddim(const std::vector& dims); - -/** - * \brief Make a DDim from an initializer list - * - * \param dims An initializer list of ints. Must be sized between [1, 9] - * - */ -DDim make_ddim(std::initializer_list dims); - -template -std::vector vectorize(const DDim& ddim) { - std::vector result(DDim::kMaxRank); - dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); - result.resize(ddim.size()); - return result; -} - -int64_t product(const DDim& ddim); - -bool contain_unknown_dim(const DDim& ddim); - -/** - * \brief Slice a ddim - * - * Slice dim with [begin, end). - * e.g. DDim d = make_ddim({1,2,3,4,5}); - * slice_ddim(d, 1, 3); ====> {2,3} - */ -DDim slice_ddim(const DDim& dim, int begin, int end); - -/** - * \brief What is the length of this dimension? - * - * \param Dynamic dimension to inspect - */ - -int arity(const DDim& ddim); - -std::ostream& operator<<(std::ostream&, const DDim&); - -/** -* \brief Flatten dim to 3d -* e.g., DDim d = mak_ddim({1, 2, 3, 4, 5, 6}) -* flatten_to_3d(d, 2, 4); ===> {1*2, 3*4, 5*6} ===> {2, 12, 30} -*/ -DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims); - -// Reshape a tensor to a matrix. The matrix's first dimension(column length) -// will be the product of tensor's first `num_col_dims` dimensions. -DDim flatten_to_2d(const DDim& src, int num_col_dims); - -DDim flatten_to_1d(const DDim& src); - -DDim stride(const DDim& ddim); +using DDim = pten::framework::DDim; +using namespace pten::framework; // NOLINT -DDim stride_numel(const DDim& ddim); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ddim_test.cc b/paddle/fluid/framework/ddim_test.cc deleted file mode 100644 index e89f77ae496c499b7408d4e0836c2abe5cff9660..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ddim_test.cc +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include - -#include "gtest/gtest.h" -#include "paddle/fluid/framework/ddim.h" - -TEST(DDim, Equality) { - // construct a DDim from an initialization list - paddle::framework::DDim ddim = paddle::framework::make_ddim({9, 1, 5}); - EXPECT_EQ(ddim[0], 9); - EXPECT_EQ(ddim[1], 1); - EXPECT_EQ(ddim[2], 5); - - // construct a DDim from a vector - std::vector vec({9, 1, 5}); - paddle::framework::DDim vddim = paddle::framework::make_ddim(vec); - EXPECT_EQ(ddim[0], 9); - EXPECT_EQ(ddim[1], 1); - EXPECT_EQ(ddim[2], 5); - - // mutate a DDim - ddim[1] = 2; - EXPECT_EQ(ddim[1], 2); - ddim[0] = 6; - EXPECT_EQ(ddim[0], 6); - - // vectorize a DDim - std::vector res_vec = paddle::framework::vectorize(vddim); - EXPECT_EQ(res_vec[0], 9); - EXPECT_EQ(res_vec[1], 1); - EXPECT_EQ(res_vec[2], 5); - paddle::framework::Dim<3> d(3, 2, 1); - res_vec = paddle::framework::vectorize(paddle::framework::DDim(d)); - EXPECT_EQ(res_vec[0], 3); - EXPECT_EQ(res_vec[1], 2); - EXPECT_EQ(res_vec[2], 1); - - // arity of a DDim - EXPECT_EQ(paddle::framework::arity(ddim), 3); - EXPECT_EQ(ddim.size(), 3); - - // product of a DDim - EXPECT_EQ(paddle::framework::product(vddim), 45); - EXPECT_EQ( - paddle::framework::product(paddle::framework::make_ddim({3, 2, 5, 3})), - 90); - - // slice a DDim - paddle::framework::DDim ddim2 = - paddle::framework::make_ddim({1, 2, 3, 4, 5, 6}); - paddle::framework::DDim ss = paddle::framework::slice_ddim(ddim2, 2, 5); - EXPECT_EQ(arity(ss), 3); - EXPECT_EQ(ss[0], 3); - EXPECT_EQ(ss[1], 4); - EXPECT_EQ(ss[2], 5); - paddle::framework::DDim ss2 = paddle::framework::slice_ddim(ddim2, 0, 6); - EXPECT_EQ(arity(ss2), 6); - EXPECT_EQ(ss2[0], 1); - EXPECT_EQ(ss2[1], 2); - EXPECT_EQ(ss2[2], 3); - EXPECT_EQ(ss2[3], 4); - EXPECT_EQ(ss2[4], 5); - EXPECT_EQ(ss2[5], 6); -} - -TEST(DDim, Print) { - // print a DDim - std::stringstream ss; - paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 3, 4}); - ss << ddim; - EXPECT_EQ("2, 3, 4", ss.str()); -} diff --git a/paddle/fluid/framework/dim.h b/paddle/fluid/framework/dim.h index 66214b265fdf9078aeda4efa37c7ad1f2bbef62b..6abae4e73183295257c0a32c7069cf257507e794 100644 --- a/paddle/fluid/framework/dim.h +++ b/paddle/fluid/framework/dim.h @@ -12,89 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. #pragma once - -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/array.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/dim.h" namespace paddle { namespace framework { - -// Statically sized, statically indexed dimension template -class Dim : public Array { - public: - static_assert(D >= 0, "D must be not less than 0"); - - static constexpr int kRank = D; - using BaseClass = Array; - - inline Dim(int64_t head, const Dim& tail) { - (*this)[0] = head; - new (this->GetMutable() + 1) Dim(tail); - } - - template - HOSTDEVICE explicit Dim(int64_t head, Args... args) - : BaseClass(head, args...) {} - - /** Construct a Dim with each dimension set to the given index */ - HOSTDEVICE explicit Dim(int64_t idx) { this->Fill(idx); } - - HOSTDEVICE Dim() = default; - - HOST std::string to_string() const; -}; - -// Product of a Dim -template -HOSTDEVICE inline int64_t product(const Dim& a) { - return UnrollProduct::Run(a.Get()); -} - -/** - * Helper function to create a Dim - * - * \param idxes The type of Dim constructed depends on the number of params - * - */ - -template -HOSTDEVICE inline Dim make_dim(Args... idxes) { - return Dim(idxes...); -} - -// Allows us to output a Dim -template -inline std::ostream& operator<<(std::ostream& os, const Dim& d) { - os << d[0]; - for (int i = 1; i < D; ++i) { - os << ", " << d[i]; - } - return os; -} - -inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) { - return os; -} - -template -HOST std::string Dim::to_string() const { - std::stringstream stream; - stream << *this; - return stream.str(); -} - -template -inline void static_dim_assign(const T1* in, T2* out) { - UnrollAssign::Run(in, out); -} +using Dim = pten::framework::Dim; +using namespace pten::framework; // NOLINT } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/amp/check_finite_and_unscale_op.h b/paddle/fluid/operators/amp/check_finite_and_unscale_op.h index 29b96c4a6704a877b479a35fb39bb213ab29a10d..49ca2c3862a5e1ee712c9f4c606bbb642d463e8d 100644 --- a/paddle/fluid/operators/amp/check_finite_and_unscale_op.h +++ b/paddle/fluid/operators/amp/check_finite_and_unscale_op.h @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/isfinite_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/amp/update_loss_scaling_op.h b/paddle/fluid/operators/amp/update_loss_scaling_op.h index decc3c3b924c45143c7f7e117fa12e0bf1c05444..2c953d4eee373667fbf0564d35e4f2219667da44 100644 --- a/paddle/fluid/operators/amp/update_loss_scaling_op.h +++ b/paddle/fluid/operators/amp/update_loss_scaling_op.h @@ -24,7 +24,7 @@ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/errors.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bce_loss_op.cu b/paddle/fluid/operators/bce_loss_op.cu index d493dad132992adc01b8203990e6c1c557e37f97..6595d6deccd9aad2b24ec3a13a2008989b0af9ee 100644 --- a/paddle/fluid/operators/bce_loss_op.cu +++ b/paddle/fluid/operators/bce_loss_op.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bernoulli_op.h b/paddle/fluid/operators/bernoulli_op.h index 40f285d11f194057d950f45798bea07439398ab0..da66742e08fd924567ec42e98a6fff19b212e277 100644 --- a/paddle/fluid/operators/bernoulli_op.h +++ b/paddle/fluid/operators/bernoulli_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bilateral_slice_op.h b/paddle/fluid/operators/bilateral_slice_op.h index 0903fe4c71d3d7123c6f340d9e83d526c72dfccb..3ef13c421cdfb0844bf0fa3430647749a5e5c2fc 100644 --- a/paddle/fluid/operators/bilateral_slice_op.h +++ b/paddle/fluid/operators/bilateral_slice_op.h @@ -13,7 +13,7 @@ #include #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bincount_op.cu b/paddle/fluid/operators/bincount_op.cu index cf189193d1c11ab6bb6c9fc3571f7ba6a8ea9081..5964b9e345e93acbdfa7a405f3c64a71bf41bd78 100644 --- a/paddle/fluid/operators/bincount_op.cu +++ b/paddle/fluid/operators/bincount_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/bincount_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/deformable_conv_func.h b/paddle/fluid/operators/deformable_conv_func.h index ba1c5044302232c45f4d53236290712c33c3a352..99d1d7c4776c33f1350bccec0fe7ae99df1960ec 100644 --- a/paddle/fluid/operators/deformable_conv_func.h +++ b/paddle/fluid/operators/deformable_conv_func.h @@ -24,7 +24,7 @@ #pragma once #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" template HOSTDEVICE T DmcnGetGradientWeight(T argmax_h, T argmax_w, const int h, diff --git a/paddle/fluid/operators/dequantize_log_op.cu b/paddle/fluid/operators/dequantize_log_op.cu index 39f4fdb71b69dd111cdde673d8117c8fad58dc1b..821b87bf0595a6b7d20e7743327343bd77c77567 100644 --- a/paddle/fluid/operators/dequantize_log_op.cu +++ b/paddle/fluid/operators/dequantize_log_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/dequantize_log_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/box_clip_op.cu b/paddle/fluid/operators/detection/box_clip_op.cu index 17013efcc98b7f66cd6f5ddfe352281cf17ad4fb..53727d9d08747d925aa6a854978604fa666aa26b 100644 --- a/paddle/fluid/operators/detection/box_clip_op.cu +++ b/paddle/fluid/operators/detection/box_clip_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detection/box_clip_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu index 10c402e5a4078ad651543d15118c584fd2e72eff..7102c4cffe21ada7566f9b6cb5d8459f55265f96 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection/yolo_box_op.h b/paddle/fluid/operators/detection/yolo_box_op.h index e06c81052a0f42c9db4d96e49d2708e64e4f3137..31a67ecc266352be33db013bbf1785ba98c0756d 100644 --- a/paddle/fluid/operators/detection/yolo_box_op.h +++ b/paddle/fluid/operators/detection/yolo_box_op.h @@ -14,7 +14,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/distribution_helper.h b/paddle/fluid/operators/distribution_helper.h index 8bb963979e5a71f7f3a46fbdcc0614582fb43746..a13ae570906871b33e841b5bcb449ead9351c637 100644 --- a/paddle/fluid/operators/distribution_helper.h +++ b/paddle/fluid/operators/distribution_helper.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/for_range.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #if !defined(_WIN32) #define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) diff --git a/paddle/fluid/operators/elementwise/elementwise_functor.h b/paddle/fluid/operators/elementwise/elementwise_functor.h index 8a6cadc2413dc7c35455e6f118e3c034073e32f2..daca105ce46bb266ff8e53d93286415804a2594a 100644 --- a/paddle/fluid/operators/elementwise/elementwise_functor.h +++ b/paddle/fluid/operators/elementwise/elementwise_functor.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/platform/complex.h" +#include "paddle/pten/core/array.h" #include "paddle/pten/kernels/funcs/elementwise_functor.h" namespace paddle { @@ -92,12 +92,12 @@ using Complex = paddle::platform::complex; template struct DivGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()(const InT a, - const InT b, - const InT c) { + inline HOSTDEVICE pten::framework::Array operator()(const InT a, + const InT b, + const InT c) { // dx = dout / y // dy = - dout * out / y - paddle::framework::Array outs; + pten::framework::Array outs; outs[0] = a / c; outs[1] = -a * b / c; return outs; @@ -106,9 +106,9 @@ struct DivGradXYFunctor { template struct DivGradXYFunctor, Complex> { - inline HOSTDEVICE paddle::framework::Array, 2> operator()( + inline HOSTDEVICE pten::framework::Array, 2> operator()( const Complex a, const Complex b, const Complex c) { - paddle::framework::Array, 2> outs; + pten::framework::Array, 2> outs; Complex c_conj(c.real, -c.imag); Complex out_div_c_conj((b / c).real, -(b / c).imag); outs[0] = a / c_conj; @@ -247,9 +247,9 @@ struct MinGradYFunctor { template struct MinGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()( + inline HOSTDEVICE pten::framework::Array operator()( const InT& x, const InT& y, const InT& dout) { - paddle::framework::Array outs; + pten::framework::Array outs; // dx = dout * (x < y) outs[0] = static_cast(dout * static_cast(x < y)); // dy = dout * (x >= y) @@ -273,10 +273,10 @@ struct MulGradFunctor> { template struct MulGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()(const InT a, - const InT b, - const InT c) { - paddle::framework::Array outs; + inline HOSTDEVICE pten::framework::Array operator()(const InT a, + const InT b, + const InT c) { + pten::framework::Array outs; // dx = dout * y outs[0] = a * b; // dy = dout * x @@ -287,9 +287,9 @@ struct MulGradXYFunctor { template struct MulGradXYFunctor, Complex> { - inline HOSTDEVICE paddle::framework::Array, 2> operator()( + inline HOSTDEVICE pten::framework::Array, 2> operator()( const Complex a, const Complex b, const Complex c) { - paddle::framework::Array, 2> outs; + pten::framework::Array, 2> outs; // dx = dout * y Complex b_conj(b.real, -b.imag); outs[0] = a * b_conj; @@ -316,9 +316,9 @@ struct MaxGradYFunctor { template struct MaxGradXYFunctor { - inline HOSTDEVICE paddle::framework::Array operator()( + inline HOSTDEVICE pten::framework::Array operator()( const InT& x, const InT& y, const InT& dout) { - paddle::framework::Array outs; + pten::framework::Array outs; // dx = dout * (x > y) outs[0] = static_cast(dout * static_cast(x > y)); // dy = dout * (x <= y) diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index 21e7079ff62334a1e90e106ebd4864809aea4b2a..c31139611e84c2ad5f84dea0ac25b4c1e58d4503 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -20,8 +20,8 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h index da386052c7dc01f405f5030922f3801bd998ce62..a595e5078b21d3422bc6bb0b1658357c47656e72 100644 --- a/paddle/fluid/operators/grid_sampler_op.h +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/gather.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/histogram_op.cu b/paddle/fluid/operators/histogram_op.cu index 2bf259f7d7a7a348a3ef4e1a0c07975da41f0c83..a34f4b8a22e57609642003b626a1f041bb924a59 100644 --- a/paddle/fluid/operators/histogram_op.cu +++ b/paddle/fluid/operators/histogram_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/histogram_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h index 93cfba196468449d1961c769afa5fe33090bdae7..fbfed71e1ecd460b7d7e7c07c78021e98f117fb9 100644 --- a/paddle/fluid/operators/huber_loss_op.h +++ b/paddle/fluid/operators/huber_loss_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/interpolate_op.h b/paddle/fluid/operators/interpolate_op.h index baa292319d36e4b9f0cf64bba70da553d9992b0e..0c0dde6bd4536328d8facbffa6f59e2c1a7b899d 100644 --- a/paddle/fluid/operators/interpolate_op.h +++ b/paddle/fluid/operators/interpolate_op.h @@ -15,7 +15,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/interpolate_v2_op.h b/paddle/fluid/operators/interpolate_v2_op.h index a5afb18b3ff6f4d52ee6c7de550807843d586620..4d6189b57bf1cdacaa4457ebd8e13d158b04fa41 100644 --- a/paddle/fluid/operators/interpolate_v2_op.h +++ b/paddle/fluid/operators/interpolate_v2_op.h @@ -15,7 +15,7 @@ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/kernel_primitives/datamover_primitives.h b/paddle/fluid/operators/kernel_primitives/datamover_primitives.h index ce45ed0301e92e4e67237ceda3a2a5e03e8b0792..45697073cbf85b436a4db33b0a2d49d8b805fd63 100644 --- a/paddle/fluid/operators/kernel_primitives/datamover_primitives.h +++ b/paddle/fluid/operators/kernel_primitives/datamover_primitives.h @@ -20,6 +20,7 @@ #ifdef PADDLE_WITH_HIP #include #endif +#include "paddle/pten/core/ddim.h" namespace paddle { namespace operators { @@ -85,7 +86,7 @@ struct FastDivMod { template struct BroadcastConfig { FastDivMod divmoders[kDims]; - uint32_t strides[framework::DDim::kMaxRank]; + uint32_t strides[pten::framework::DDim::kMaxRank]; HOSTDEVICE BroadcastConfig() {} HOSTDEVICE BroadcastConfig(const std::vector& out_dims, diff --git a/paddle/fluid/operators/kldiv_loss_op.h b/paddle/fluid/operators/kldiv_loss_op.h index 0bc53d7dd7b3b1f3cb8545003efadecb252cf74c..40199677fe9a33012f436c1f785b2ed0995b1107 100644 --- a/paddle/fluid/operators/kldiv_loss_op.h +++ b/paddle/fluid/operators/kldiv_loss_op.h @@ -13,7 +13,7 @@ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 3949a066e08680ac889d18439f0554e8103bcd7b..b758efb065209496922cededa90729856a40a6b0 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -19,7 +19,7 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.c #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cross_entropy_op.h" #include "paddle/fluid/operators/lstm_unit_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math.h b/paddle/fluid/operators/math.h index 3b28928a52892db865523c71ea72b234bd1a5edc..f5ce5af70bd7a87dd27b4d73223b0369c5fa48ca 100644 --- a/paddle/fluid/operators/math.h +++ b/paddle/fluid/operators/math.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #include "math.h" // NOLINT diff --git a/paddle/fluid/operators/math/algorithm.h b/paddle/fluid/operators/math/algorithm.h index 346c693a22d85287c45b2b766f8126f677d7b2ad..cbe1a03d90d850ad5de379041edf844262c7ac79 100644 --- a/paddle/fluid/operators/math/algorithm.h +++ b/paddle/fluid/operators/math/algorithm.h @@ -18,7 +18,7 @@ #include // for int64_t #include -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/complex_functors.h b/paddle/fluid/operators/math/complex_functors.h index 3214adb095376916facd03df6aafef651283e16a..48f16b87cbd66c6a39c74d1dbaab2349193f04ae 100644 --- a/paddle/fluid/operators/math/complex_functors.h +++ b/paddle/fluid/operators/math/complex_functors.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/platform/complex.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cos_sim_functor.h b/paddle/fluid/operators/math/cos_sim_functor.h index 9a24bfc3312665be296f2a6c89da27758d43550e..61827af950bd59848646e2841c97e31e143085e1 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.h +++ b/paddle/fluid/operators/math/cos_sim_functor.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index db19818951d7c9f7e55f8acf2f2de7e3e3819694..e7ac1760d3b9ca77700306451e5ba6d69c52b0fa 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index f88b4a6e41cf9f25e75358514e78a36717253004..89a1efe133387b937d6192b50bb7616d6a16b286 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index 38bd1a3dadb6317580829b0ddc964442a5b25013..def25a680cb95f71bec87999a27f4bd3612c96a2 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/gru_kernel.h b/paddle/fluid/operators/math/detail/gru_kernel.h index d9be8e80658fa24458be45a5b8208fce5741b786..603f5f3426f0dbde375c7b94192deb400c8d7e7c 100644 --- a/paddle/fluid/operators/math/detail/gru_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_kernel.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" // TODO(guosheng): refine code style in gru_kernel namespace paddle { diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index 003ec194366c9990273e212c295aa4fe70818441..33dcde4590068b0241fdd3567206dc72f15caa2b 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/maxouting.h b/paddle/fluid/operators/math/maxouting.h index 50bddf73bc10ccd35ee774eea065b005cdaa281b..ceeb85d6d36ef3fd299a2ffa84cf0de243a1e76a 100644 --- a/paddle/fluid/operators/math/maxouting.h +++ b/paddle/fluid/operators/math/maxouting.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/macros.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 4743f0dc9faf1d6d553116413efec8eb54b43e4d..f0637a40b8cde0d51a9f5cfbaf8046b988a22e8a 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -20,8 +20,8 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/macros.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.cu b/paddle/fluid/operators/modified_huber_loss_op.cu index 3c85da3c52c6c99209dc758d7983174acd99c9fc..ea08dc8084abf1ef54b3aa8fa19c15389d425025 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cu +++ b/paddle/fluid/operators/modified_huber_loss_op.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/modified_huber_loss_op.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.h b/paddle/fluid/operators/modified_huber_loss_op.h index 398676ba7415111ea05691e6bc7aa7618084fb14..4f552edf97bbe72976f43949b24dd5b58dbe07c1 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.h +++ b/paddle/fluid/operators/modified_huber_loss_op.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multinomial_op.h b/paddle/fluid/operators/multinomial_op.h index 14cfbd268389ec8798d2e8438b91c8b250972221..df4c2e9e7bbf63590dd853f75202ead30654e1d0 100644 --- a/paddle/fluid/operators/multinomial_op.h +++ b/paddle/fluid/operators/multinomial_op.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nll_loss_op.cu b/paddle/fluid/operators/nll_loss_op.cu index 03af45634149df94a2547b9df7653d81f9b6be13..e3c99afe820c2d5123716687d3bdbb36c6f4fdd4 100644 --- a/paddle/fluid/operators/nll_loss_op.cu +++ b/paddle/fluid/operators/nll_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/nll_loss_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roll_op.cu b/paddle/fluid/operators/roll_op.cu index 57986d262820d02aba275b9e1a08dcfe49b62017..7e8e37bd2ee8fd332762a952ec8fd493de2ae3bb 100644 --- a/paddle/fluid/operators/roll_op.cu +++ b/paddle/fluid/operators/roll_op.cu @@ -13,11 +13,11 @@ // limitations under the License. #pragma once -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/roll_op.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" +#include "paddle/pten/core/array.h" namespace paddle { namespace operators { @@ -28,9 +28,9 @@ using LoDTensor = framework::LoDTensor; template __global__ void RollCudaKernel(const T* input, T* output, int64_t N, - paddle::framework::Array shifts, - paddle::framework::Array strides, - paddle::framework::Array sizes) { + pten::framework::Array shifts, + pten::framework::Array strides, + pten::framework::Array sizes) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; @@ -101,9 +101,9 @@ class RollKernel #define CALL_ROLL_CUDA_KERNEL(N) \ case N: { \ - paddle::framework::Array _strides; \ - paddle::framework::Array _shifts; \ - paddle::framework::Array _sizes; \ + pten::framework::Array _strides; \ + pten::framework::Array _shifts; \ + pten::framework::Array _sizes; \ for (size_t idx = 0; idx < N; ++idx) { \ _strides[idx] = strides[idx]; \ _shifts[idx] = shifts[idx]; \ diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu index cc012230c1062968da2cd4f2833508795a27b079..de29822b8d7fe9c12b55ccd167dd996d190376fd 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -22,7 +22,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h index efe3afba18e8f3368f2e21f91adc5aa935bf713a..e30b48b1500ed7e5a8fc916879d03024981688f2 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ b/paddle/fluid/operators/smooth_l1_loss_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unstack_op.h b/paddle/fluid/operators/unstack_op.h index cfd4d6bce83643982977e626fb809ff87384f264..413470e3db5d47fc11230e771ebe08f38c02565f 100644 --- a/paddle/fluid/operators/unstack_op.h +++ b/paddle/fluid/operators/unstack_op.h @@ -20,7 +20,6 @@ limitations under the License. */ #if defined(__NVCC__) || defined(__HIPCC__) #include -#include "paddle/fluid/framework/array.h" #endif namespace paddle { diff --git a/paddle/fluid/platform/aligned_vector.h b/paddle/fluid/platform/aligned_vector.h index 7d014f6bdcb0bdd00f302c58e84d5314b7552b50..144c017414a5dda6cfb28fb03b63efef4ffb1ca7 100644 --- a/paddle/fluid/platform/aligned_vector.h +++ b/paddle/fluid/platform/aligned_vector.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/eigen_ext.h b/paddle/fluid/platform/eigen_ext.h index 2b3d1693f6245e511e734b7015af9a2614e9d80f..872a6cf062eeff01e2725e8d8ea64058438be114 100644 --- a/paddle/fluid/platform/eigen_ext.h +++ b/paddle/fluid/platform/eigen_ext.h @@ -17,7 +17,7 @@ #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" #include "unsupported/Eigen/CXX11/Tensor" diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index cc9919d8366be3c3cbf01175a46d8ab6c5a5b23f..e3a391462878a3e1be0efc7109dec3c2543eba63 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/place.h" +#include "paddle/pten/core/hostdevice.h" #if defined(__NVCC__) || defined(__HIPCC__) #include diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index 23f5865971246b2862f859885f5bfccd926b9697..32ec113d1f5e5334035db4550db7d9f828df3c9d 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +#include "paddle/pten/core/hostdevice.h" template class Scale { diff --git a/paddle/pten/api/include/tensor.h b/paddle/pten/api/include/tensor.h index c26c9ce839458157737b7bb7f39f2ffc73081209..d2afd703eaf2a1827143fd6b6f47c6f42941c250 100644 --- a/paddle/pten/api/include/tensor.h +++ b/paddle/pten/api/include/tensor.h @@ -42,12 +42,12 @@ class DenseTensor; namespace pten { class TensorBase; +namespace framework { +class DDim; +} // namespace framework } // namespace pten namespace paddle { -namespace framework { -class DDim; -} namespace experimental { @@ -159,9 +159,9 @@ class PADDLE_API Tensor final { /** * @brief Return the dimensions of Tensor. * - * @return paddle::framework::DDim + * @return pten::framework::DDim */ - paddle::framework::DDim dims() const; + pten::framework::DDim dims() const; /** * @brief Return the shape (dimensions) of Tensor. diff --git a/paddle/pten/api/lib/tensor.cc b/paddle/pten/api/lib/tensor.cc index cb70d26f947b874247141f4a9bb547f3d73b63dc..0ccc9c56dbff729061ec759d8e9a627b75a853ef 100644 --- a/paddle/pten/api/lib/tensor.cc +++ b/paddle/pten/api/lib/tensor.cc @@ -47,13 +47,13 @@ limitations under the License. */ * In the future, the necessary components will be moved to the this library, * or the corresponding components will be re-implemented. */ -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/stream/cuda_stream.h" #include "paddle/pten/common/complex.h" #include "paddle/pten/common/float16.h" +#include "paddle/pten/core/ddim.h" namespace paddle { namespace experimental { @@ -94,10 +94,10 @@ int64_t Tensor::numel() const { return impl_->numel(); } int64_t Tensor::size() const { return impl_->numel(); } -paddle::framework::DDim Tensor::dims() const { return impl_->dims(); } +pten::framework::DDim Tensor::dims() const { return impl_->dims(); } std::vector Tensor::shape() const { - return paddle::framework::vectorize(impl_->dims()); + return pten::framework::vectorize(impl_->dims()); } void Tensor::reshape(const std::vector &shape) { diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index facc9ac005662451a60451e440b73fab5057d14d..eabc5a19babad95cd2f5f88c46c4c59078d3e156 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -15,6 +15,15 @@ cc_library(tensor_meta SRCS tensor_meta.cc DEPS enforce mixed_vector) cc_library(dense_tensor SRCS dense_tensor.cc DEPS convert_utils tensor_meta tensor_base) cc_library(pten_device_context SRCS device_context.cc DEPS tensor_base ) +cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) +cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) +cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) +if(WITH_GPU) + nv_test(dim_test SRCS dim_test.cu DEPS ddim) +elseif(WITH_ROCM) + hip_test(dim_test SRCS dim_test.cu DEPS ddim) +endif() + # Will remove once we implemented MKLDNN_Tensor if(WITH_MKLDNN) add_dependencies(dense_tensor mkldnn) diff --git a/paddle/fluid/framework/array.h b/paddle/pten/core/array.h similarity index 94% rename from paddle/fluid/framework/array.h rename to paddle/pten/core/array.h index 0ec9cb81129c2125576accd200c8131621685fc8..86d222d2d57b3e639b9eff07b0ba905d30a7f2f9 100644 --- a/paddle/fluid/framework/array.h +++ b/paddle/pten/core/array.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,12 @@ #pragma once #include -#include "paddle/fluid/framework/unroll_array_ops.h" +#include "paddle/pten/core/unroll_array_ops.h" +// TODO(paddle-dev): Need to modify into pten/core/enforce.h #include "paddle/fluid/platform/enforce.h" -namespace paddle { +namespace pten { +namespace platform = paddle::platform; namespace framework { template @@ -146,4 +148,4 @@ class Array { }; } // namespace framework -} // namespace paddle +} // namespace pten diff --git a/paddle/fluid/framework/ddim.cc b/paddle/pten/core/ddim.cc similarity index 77% rename from paddle/fluid/framework/ddim.cc rename to paddle/pten/core/ddim.cc index 8bac8b7df6d2d1298ace36e32b4a47959996b769..663f92a5bf8d0a833be8f35af77a6326a5f14fae 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/pten/core/ddim.cc @@ -1,22 +1,22 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/ddim.h" +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/core/ddim.h" #include -#include "paddle/fluid/platform/enforce.h" -namespace paddle { +namespace pten { +namespace platform = paddle::platform; namespace framework { DDim make_ddim(std::initializer_list dims) { @@ -82,10 +82,13 @@ bool contain_unknown_dim(const DDim& ddim) { DDim slice_ddim(const DDim& dim, int begin, int end) { PADDLE_ENFORCE_EQ( - (begin >= 0 && end <= dim.size()), true, + (begin >= 0 && end <= dim.size()), + true, platform::errors::InvalidArgument( - "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin, - end, dim.size())); + "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", + begin, + end, + dim.size())); // Constructor of DDim would check whether end - begin is valid return DDim(dim.Get() + begin, end - begin); } @@ -108,27 +111,34 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { } DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims) { - PADDLE_ENFORCE_GE(src.size(), 3, + PADDLE_ENFORCE_GE(src.size(), + 3, platform::errors::InvalidArgument( "The rank of src dim should be at least 3 " "in flatten_to_3d, but received %d.", src.size())); - PADDLE_ENFORCE_EQ((num_row_dims >= 1 && num_row_dims < src.size()), true, + PADDLE_ENFORCE_EQ((num_row_dims >= 1 && num_row_dims < src.size()), + true, platform::errors::InvalidArgument( "The num_row_dims should be inside [1, %d] " "in flatten_to_3d, but received %d.", - src.size() - 1, num_row_dims)); - PADDLE_ENFORCE_EQ((num_col_dims >= 2 && num_col_dims <= src.size()), true, + src.size() - 1, + num_row_dims)); + PADDLE_ENFORCE_EQ((num_col_dims >= 2 && num_col_dims <= src.size()), + true, platform::errors::InvalidArgument( "The num_col_dims should be inside [2, %d] " "in flatten_to_3d, but received %d.", - src.size(), num_col_dims)); + src.size(), + num_col_dims)); PADDLE_ENFORCE_GE( - num_col_dims, num_row_dims, + num_col_dims, + num_row_dims, platform::errors::InvalidArgument( "The num_row_dims should be less than num_col_dims in flatten_to_3d," "but received num_row_dims = %d, num_col_dims = %d.", - num_row_dims, num_col_dims)); + num_row_dims, + num_col_dims)); return DDim({product(slice_ddim(src, 0, num_row_dims)), product(slice_ddim(src, num_row_dims, num_col_dims)), @@ -169,13 +179,16 @@ DDim DDim::reshape(const std::vector& shape) const { out_dims.rank_ = shape.size(); for (size_t i = 0; i < shape.size(); ++i) { if (shape[i] == copy_dim_val) { - PADDLE_ENFORCE_LT(static_cast(i), in_dims.size(), + PADDLE_ENFORCE_LT(static_cast(i), + in_dims.size(), platform::errors::InvalidArgument( "Index %d of shape under which the value of 0 " "is stored, must be lower than the number of " "old dimensions. But received shape[%d] = 0, " "dimensions = %d, shape = [%s].", - i, in_dims.size(), in_dims)); + i, + in_dims.size(), + in_dims)); out_dims[i] = in_dims[i]; } else { out_dims[i] = shape[i]; @@ -190,19 +203,23 @@ DDim DDim::transpose(const std::vector& axis) const { size_t axis_size = axis.size(); auto axis_set = std::set(axis.begin(), axis.end()); - PADDLE_ENFORCE_EQ(axis_set.size(), axis_size, + PADDLE_ENFORCE_EQ(axis_set.size(), + axis_size, platform::errors::InvalidArgument( "In an axis array, elements must be unique.")); PADDLE_ENFORCE_EQ( - in_rank, axis_size, + in_rank, + axis_size, platform::errors::InvalidArgument("The input dimension's size " "should be equal to the axis's size. " "But received dimension is %d, " "axis's size is %d", - in_rank, axis_size)); + in_rank, + axis_size)); - PADDLE_ENFORCE_LT(*std::max_element(axis.begin(), axis.end()), axis_size, + PADDLE_ENFORCE_LT(*std::max_element(axis.begin(), axis.end()), + axis_size, platform::errors::InvalidArgument( "Axis values must be ranging from 0 to (dims - 1).")); @@ -214,4 +231,4 @@ DDim DDim::transpose(const std::vector& axis) const { } } // namespace framework -} // namespace paddle +} // namespace pten \ No newline at end of file diff --git a/paddle/pten/core/ddim.h b/paddle/pten/core/ddim.h new file mode 100644 index 0000000000000000000000000000000000000000..148c32481c0086059b5e66910a094dd427e9bbda --- /dev/null +++ b/paddle/pten/core/ddim.h @@ -0,0 +1,257 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +#include +#include + +#include "paddle/pten/core/dim.h" + +namespace pten { +namespace platform = paddle::platform; +namespace framework { + +#define PADDLE_VISIT_DDIM_BASE(rank, callback) \ + case (rank): { \ + constexpr auto kRank = (rank); \ + return (callback); \ + } + +#define PADDLE_VISIT_DDIM(rank, callback) \ + switch (rank) { \ + PADDLE_VISIT_DDIM_BASE(0, callback); \ + PADDLE_VISIT_DDIM_BASE(1, callback); \ + PADDLE_VISIT_DDIM_BASE(2, callback); \ + PADDLE_VISIT_DDIM_BASE(3, callback); \ + PADDLE_VISIT_DDIM_BASE(4, callback); \ + PADDLE_VISIT_DDIM_BASE(5, callback); \ + PADDLE_VISIT_DDIM_BASE(6, callback); \ + PADDLE_VISIT_DDIM_BASE(7, callback); \ + PADDLE_VISIT_DDIM_BASE(8, callback); \ + PADDLE_VISIT_DDIM_BASE(9, callback); \ + default: \ + PADDLE_THROW(platform::errors::Unimplemented( \ + "Invalid dimension to be accessed. Now only supports access to " \ + "dimension 0 to 9, but received dimension is %d.", \ + rank)); \ + } + +template +inline void dynamic_dim_assign(const T1* in, T2* out, int n) { + PADDLE_VISIT_DDIM(n, (static_dim_assign(in, out))); +} + +/** + * \brief A dynamically sized dimension. + * + * The number of dimensions must be between [1, 9]. + */ +class DDim { + public: + constexpr static int kMaxRank = 9; + + DDim() : rank_(1) { dim_[0] = 0; } + + DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); } + + DDim(const int* d, int n) : rank_(n) { + dynamic_dim_assign(d, dim_.GetMutable(), n); + } + + DDim(const int64_t* d, int n) : rank_(n) { + dynamic_dim_assign(d, dim_.GetMutable(), n); + } + + template + /*implicit*/ DDim(const Dim& in) : rank_(D) { // NOLINT + UnsafeCast() = in; + } + + /*implicit*/ DDim(std::initializer_list init_list) + : DDim(init_list.begin(), init_list.size()) {} + + inline DDim& operator=(const DDim& ddim) { return CopyFrom(ddim); } + + template + inline DDim& operator=(const Dim& dim) { + rank_ = D; + UnsafeCast() = dim; + return *this; + } + + inline int64_t& operator[](int idx) { return dim_[idx]; } + + inline int64_t operator[](int idx) const { return dim_[idx]; } + + int64_t& at(int idx) { + PADDLE_ENFORCE_GE(idx, + 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + PADDLE_ENFORCE_LT(idx, + rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + return dim_[idx]; + } + + int64_t at(int idx) const { + PADDLE_ENFORCE_GE(idx, + 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + PADDLE_ENFORCE_LT(idx, + rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, + idx)); + return dim_[idx]; + } + + template + typename std::result_of&)>::type apply_visitor( + Visitor&& visitor) { + PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); + } + + template + typename std::result_of&)>::type apply_visitor( + Visitor&& visitor) const { + PADDLE_VISIT_DDIM(rank_, visitor(UnsafeCast())); + } + + bool operator==(const DDim& d) const; + + bool operator!=(const DDim& d) const; + + inline const int64_t* Get() const { return dim_.Get(); } + + inline int64_t* GetMutable() { return dim_.GetMutable(); } + + inline int size() const { return rank_; } + + std::string to_str() const; + + DDim reshape(const std::vector& shape) const; + + DDim transpose(const std::vector& axis) const; + + private: + template + inline Dim& UnsafeCast() { + static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); + auto* p = static_cast(&dim_); + return *reinterpret_cast*>(p); + } + + template + inline const Dim& UnsafeCast() const { + static_assert(D >= 0 && D <= kMaxRank, "Invalid rank"); + auto* p = static_cast(&dim_); + return *reinterpret_cast*>(p); + } + + inline DDim& CopyFrom(const DDim& ddim) { + PADDLE_VISIT_DDIM(ddim.rank_, (*this = ddim.UnsafeCast())); + } + + friend DDim stride(const DDim& ddim); + friend DDim stride_numel(const DDim& ddim); + + private: + Dim dim_; + int rank_; +}; + +#undef PADDLE_VISIT_DDIM_BASE +#undef PADDLE_VISIT_DDIM + +/** + * \brief Make a DDim from std::vector + * + * \param dims An vector of ints. Must be sized between [1, 9] + */ +DDim make_ddim(const std::vector& dims); + +DDim make_ddim(const std::vector& dims); + +/** + * \brief Make a DDim from an initializer list + * + * \param dims An initializer list of ints. Must be sized between [1, 9] + * + */ +DDim make_ddim(std::initializer_list dims); + +template +std::vector vectorize(const DDim& ddim) { + std::vector result(DDim::kMaxRank); + dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); + result.resize(ddim.size()); + return result; +} + +int64_t product(const DDim& ddim); + +bool contain_unknown_dim(const DDim& ddim); + +/** + * \brief Slice a ddim + * + * Slice dim with [begin, end). + * e.g. DDim d = make_ddim({1,2,3,4,5}); + * slice_ddim(d, 1, 3); ====> {2,3} + */ +DDim slice_ddim(const DDim& dim, int begin, int end); + +/** + * \brief What is the length of this dimension? + * + * \param Dynamic dimension to inspect + */ + +int arity(const DDim& ddim); + +std::ostream& operator<<(std::ostream&, const DDim&); + +/** +* \brief Flatten dim to 3d +* e.g., DDim d = mak_ddim({1, 2, 3, 4, 5, 6}) +* flatten_to_3d(d, 2, 4); ===> {1*2, 3*4, 5*6} ===> {2, 12, 30} +*/ +DDim flatten_to_3d(const DDim& src, int num_row_dims, int num_col_dims); + +// Reshape a tensor to a matrix. The matrix's first dimension(column length) +// will be the product of tensor's first `num_col_dims` dimensions. +DDim flatten_to_2d(const DDim& src, int num_col_dims); + +DDim flatten_to_1d(const DDim& src); + +DDim stride(const DDim& ddim); + +DDim stride_numel(const DDim& ddim); +} // namespace framework +} // namespace pten diff --git a/paddle/pten/core/ddim_test.cc b/paddle/pten/core/ddim_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..1903bbfdff135ebd19d0cef258401f13fe11fa04 --- /dev/null +++ b/paddle/pten/core/ddim_test.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "gtest/gtest.h" +#include "paddle/pten/core/ddim.h" + +TEST(DDim, Equality) { + // construct a DDim from an initialization list + pten::framework::DDim ddim = pten::framework::make_ddim({9, 1, 5}); + EXPECT_EQ(ddim[0], 9); + EXPECT_EQ(ddim[1], 1); + EXPECT_EQ(ddim[2], 5); + + // construct a DDim from a vector + std::vector vec({9, 1, 5}); + pten::framework::DDim vddim = pten::framework::make_ddim(vec); + EXPECT_EQ(ddim[0], 9); + EXPECT_EQ(ddim[1], 1); + EXPECT_EQ(ddim[2], 5); + + // mutate a DDim + ddim[1] = 2; + EXPECT_EQ(ddim[1], 2); + ddim[0] = 6; + EXPECT_EQ(ddim[0], 6); + + // vectorize a DDim + std::vector res_vec = pten::framework::vectorize(vddim); + EXPECT_EQ(res_vec[0], 9); + EXPECT_EQ(res_vec[1], 1); + EXPECT_EQ(res_vec[2], 5); + pten::framework::Dim<3> d(3, 2, 1); + res_vec = pten::framework::vectorize(pten::framework::DDim(d)); + EXPECT_EQ(res_vec[0], 3); + EXPECT_EQ(res_vec[1], 2); + EXPECT_EQ(res_vec[2], 1); + + // arity of a DDim + EXPECT_EQ(pten::framework::arity(ddim), 3); + EXPECT_EQ(ddim.size(), 3); + + // product of a DDim + EXPECT_EQ(pten::framework::product(vddim), 45); + EXPECT_EQ(pten::framework::product(pten::framework::make_ddim({3, 2, 5, 3})), + 90); + + // slice a DDim + pten::framework::DDim ddim2 = pten::framework::make_ddim({1, 2, 3, 4, 5, 6}); + pten::framework::DDim ss = pten::framework::slice_ddim(ddim2, 2, 5); + EXPECT_EQ(arity(ss), 3); + EXPECT_EQ(ss[0], 3); + EXPECT_EQ(ss[1], 4); + EXPECT_EQ(ss[2], 5); + pten::framework::DDim ss2 = pten::framework::slice_ddim(ddim2, 0, 6); + EXPECT_EQ(arity(ss2), 6); + EXPECT_EQ(ss2[0], 1); + EXPECT_EQ(ss2[1], 2); + EXPECT_EQ(ss2[2], 3); + EXPECT_EQ(ss2[3], 4); + EXPECT_EQ(ss2[4], 5); + EXPECT_EQ(ss2[5], 6); +} + +TEST(DDim, Print) { + // print a DDim + std::stringstream ss; + pten::framework::DDim ddim = pten::framework::make_ddim({2, 3, 4}); + ss << ddim; + EXPECT_EQ("2, 3, 4", ss.str()); +} diff --git a/paddle/pten/core/dim.h b/paddle/pten/core/dim.h new file mode 100644 index 0000000000000000000000000000000000000000..8dd984891a894817b0b59725d3dac754eb8d99d1 --- /dev/null +++ b/paddle/pten/core/dim.h @@ -0,0 +1,100 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "paddle/pten/core/array.h" +#include "paddle/pten/core/hostdevice.h" + +namespace pten { +namespace framework { + +// Statically sized, statically indexed dimension +template +class Dim : public Array { + public: + static_assert(D >= 0, "D must be not less than 0"); + + static constexpr int kRank = D; + using BaseClass = Array; + + inline Dim(int64_t head, const Dim& tail) { + (*this)[0] = head; + new (this->GetMutable() + 1) Dim(tail); + } + + template + HOSTDEVICE explicit Dim(int64_t head, Args... args) + : BaseClass(head, args...) {} + + /** Construct a Dim with each dimension set to the given index */ + HOSTDEVICE explicit Dim(int64_t idx) { this->Fill(idx); } + + HOSTDEVICE Dim() = default; + + HOST std::string to_string() const; +}; + +// Product of a Dim +template +HOSTDEVICE inline int64_t product(const Dim& a) { + return UnrollProduct::Run(a.Get()); +} + +/** + * Helper function to create a Dim + * + * \param idxes The type of Dim constructed depends on the number of params + * + */ + +template +HOSTDEVICE inline Dim make_dim(Args... idxes) { + return Dim(idxes...); +} + +// Allows us to output a Dim +template +inline std::ostream& operator<<(std::ostream& os, const Dim& d) { + os << d[0]; + for (int i = 1; i < D; ++i) { + os << ", " << d[i]; + } + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) { + return os; +} + +template +HOST std::string Dim::to_string() const { + std::stringstream stream; + stream << *this; + return stream.str(); +} + +template +inline void static_dim_assign(const T1* in, T2* out) { + UnrollAssign::Run(in, out); +} + +} // namespace framework +} // namespace pten diff --git a/paddle/fluid/framework/dim_test.cu b/paddle/pten/core/dim_test.cu similarity index 62% rename from paddle/fluid/framework/dim_test.cu rename to paddle/pten/core/dim_test.cu index b3c26b10c6ffb4a0b651f53e6e74d67e6f3f846d..0f8d71c5d3b4cfb33b59e82709ac8bac51d18e6f 100644 --- a/paddle/fluid/framework/dim_test.cu +++ b/paddle/pten/core/dim_test.cu @@ -1,42 +1,43 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + #include #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/dim.h" +#include "paddle/pten/core/dim.h" -__global__ void test(paddle::framework::Dim<2>* o) { - o[0] = paddle::framework::make_dim(5, 6); +__global__ void test(pten::framework::Dim<2>* o) { + o[0] = pten::framework::make_dim(5, 6); } __global__ void dyn_idx_gpu(int64_t* o) { - auto d = paddle::framework::make_dim(5, 6); + auto d = pten::framework::make_dim(5, 6); o[0] = d[1]; } TEST(Dim, Equality) { // construct a Dim on the CPU - auto a = paddle::framework::make_dim(3, 4); + auto a = pten::framework::make_dim(3, 4); EXPECT_EQ(a[0], 3); EXPECT_EQ(a[1], 4); // construct a Dim on the GPU - thrust::device_vector> t(2); + thrust::device_vector> t(2); #ifdef PADDLE_WITH_HIP - hipLaunchKernelGGL(test, dim3(1), dim3(1), 0, 0, - thrust::raw_pointer_cast(t.data())); + hipLaunchKernelGGL( + test, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(t.data())); #else test<<<1, 1>>>(thrust::raw_pointer_cast(t.data())); #endif @@ -45,10 +46,10 @@ TEST(Dim, Equality) { EXPECT_EQ(a[1], 6); // product - EXPECT_EQ(paddle::framework::product(a), 30); + EXPECT_EQ(pten::framework::product(a), 30); // mutate a Dim - auto b = paddle::framework::make_dim(7, 8); + auto b = pten::framework::make_dim(7, 8); b[1] = 10; EXPECT_EQ(b[0], 7); EXPECT_EQ(b[1], 10); @@ -61,8 +62,8 @@ TEST(Dim, Equality) { // dynamic access on GPU thrust::device_vector r(1); #ifdef PADDLE_WITH_HIP - hipLaunchKernelGGL(dyn_idx_gpu, dim3(1), dim3(1), 0, 0, - thrust::raw_pointer_cast(r.data())); + hipLaunchKernelGGL( + dyn_idx_gpu, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(r.data())); #else dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data())); #endif @@ -71,9 +72,9 @@ TEST(Dim, Equality) { } TEST(Dim, Bool) { - auto a = paddle::framework::make_dim(3, 4); - auto b = paddle::framework::make_dim(5, 6); - auto c = paddle::framework::make_dim(3, 4); + auto a = pten::framework::make_dim(3, 4); + auto b = pten::framework::make_dim(5, 6); + auto c = pten::framework::make_dim(3, 4); // comparison EXPECT_TRUE(a == a); @@ -84,13 +85,13 @@ TEST(Dim, Bool) { TEST(Dim, Print) { { std::stringstream ss; - auto a = paddle::framework::make_dim(2, 3); + auto a = pten::framework::make_dim(2, 3); ss << a; EXPECT_EQ(ss.str(), "2, 3"); } { std::stringstream ss; - ss << paddle::framework::make_dim(8); + ss << pten::framework::make_dim(8); EXPECT_EQ(ss.str(), "8"); } -} +} \ No newline at end of file diff --git a/paddle/fluid/platform/hostdevice.h b/paddle/pten/core/hostdevice.h similarity index 89% rename from paddle/fluid/platform/hostdevice.h rename to paddle/pten/core/hostdevice.h index 65005a5adbb1d300d61a10008ecd86e1a4c7eb7b..08fe3125287d76654173324e42a2d0773aab444c 100644 --- a/paddle/fluid/platform/hostdevice.h +++ b/paddle/pten/core/hostdevice.h @@ -1,16 +1,17 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + #pragma once #ifdef __HIPCC__ diff --git a/paddle/pten/core/tensor_base.h b/paddle/pten/core/tensor_base.h index 528a52cee8da4287c13157dbcc2143895268e00b..662553cbcb5986daae13c11cb43b2ecf36bc12c2 100644 --- a/paddle/pten/core/tensor_base.h +++ b/paddle/pten/core/tensor_base.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/platform/place.h" #include "paddle/pten/common/backend.h" #include "paddle/pten/common/data_type.h" #include "paddle/pten/common/layout.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/storage.h" #include "paddle/pten/core/utils/type_registry.h" @@ -28,7 +28,7 @@ class TensorBase { public: using DataType = paddle::experimental::DataType; using DataLayout = paddle::experimental::DataLayout; - using DDim = paddle::framework::DDim; + using DDim = pten::framework::DDim; using Place = paddle::platform::Place; virtual ~TensorBase() = default; diff --git a/paddle/pten/core/tensor_meta.h b/paddle/pten/core/tensor_meta.h index 2df6b48b674a71fe572f7b36db4dcfa80cbd8274..ac3f17267c4f9425873592570ecc4aa901839158 100644 --- a/paddle/pten/core/tensor_meta.h +++ b/paddle/pten/core/tensor_meta.h @@ -21,7 +21,7 @@ limitations under the License. */ #include "paddle/pten/common/layout.h" // See Note [ Why still include the fluid headers? ] -#include "paddle/fluid/framework/ddim.h" +#include "paddle/pten/core/ddim.h" // Note: mixed_vector include many header now, LoD will be // used on CUDA device? Can we use small_vector here? @@ -30,7 +30,7 @@ limitations under the License. */ namespace pten { -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; using LoD = std::vector>; /// \brief The meta data of dense tensor. Take the structure type /// and use all default operations. diff --git a/paddle/fluid/framework/unroll_array_ops.h b/paddle/pten/core/unroll_array_ops.h similarity index 96% rename from paddle/fluid/framework/unroll_array_ops.h rename to paddle/pten/core/unroll_array_ops.h index a9c047cc6c6acfa5df157389fcd6055957c7bf4c..fb0358375a58ec585a33a7b830672a5bf18cc694 100644 --- a/paddle/fluid/framework/unroll_array_ops.h +++ b/paddle/pten/core/unroll_array_ops.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #include #include -#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/pten/core/hostdevice.h" -namespace paddle { +namespace pten { namespace framework { namespace detail { @@ -130,4 +130,4 @@ template using UnrollProduct = detail::UnrollProduct<0, N, N == 0>; } // namespace framework -} // namespace paddle +} // namespace pten diff --git a/paddle/fluid/framework/unroll_array_ops_test.cc b/paddle/pten/core/unroll_array_ops_test.cc similarity index 92% rename from paddle/fluid/framework/unroll_array_ops_test.cc rename to paddle/pten/core/unroll_array_ops_test.cc index c4fdfdb425f23eb0e1aef3f2414b26afd421bac4..f32d94be759beabe1c0fcf1f145601c074c3b62f 100644 --- a/paddle/fluid/framework/unroll_array_ops_test.cc +++ b/paddle/pten/core/unroll_array_ops_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/unroll_array_ops.h" +#include "paddle/pten/core/unroll_array_ops.h" #include #include -namespace paddle { +namespace pten { namespace framework { template @@ -79,4 +79,4 @@ TEST(unroll_ops, product) { } } // namespace framework -} // namespace paddle +} // namespace pten \ No newline at end of file diff --git a/paddle/pten/infermeta/binary.cc b/paddle/pten/infermeta/binary.cc index ea587806bfcb2fa49a20a6b6bb46c131c533ba9a..083fb0fca21881bcfbf078d31fb23687d07864f2 100644 --- a/paddle/pten/infermeta/binary.cc +++ b/paddle/pten/infermeta/binary.cc @@ -64,8 +64,8 @@ DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, const DenseTensorMeta& y_meta, bool trans_x, bool trans_y) { - std::vector dims_x = paddle::framework::vectorize(x_meta.dims); - std::vector dims_y = paddle::framework::vectorize(y_meta.dims); + std::vector dims_x = pten::framework::vectorize(x_meta.dims); + std::vector dims_y = pten::framework::vectorize(y_meta.dims); auto ndims_x = dims_x.size(); auto ndims_y = dims_y.size(); PADDLE_ENFORCE_GT(ndims_x, @@ -125,7 +125,7 @@ DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, new_dims.push_back(1); } - auto ddim_out = paddle::framework::make_ddim(new_dims); + auto ddim_out = pten::framework::make_ddim(new_dims); return {x_meta.dtype, ddim_out, x_meta.layout}; } @@ -169,7 +169,7 @@ DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, out_dims_array.data(), max_dim, axis); - return_meta.dims = paddle::framework::make_ddim(out_dims_array); + return_meta.dims = pten::framework::make_ddim(out_dims_array); } return_meta.lod = x_meta.lod; return return_meta; diff --git a/paddle/pten/infermeta/nullary.cc b/paddle/pten/infermeta/nullary.cc index 731e69e60907b322e01abd93708734877976429b..19e11f049fee78bab3a30b678bf7ecd67cff4847 100644 --- a/paddle/pten/infermeta/nullary.cc +++ b/paddle/pten/infermeta/nullary.cc @@ -20,14 +20,14 @@ namespace pten { DenseTensorMeta CreateInferMeta(const std::vector& shape, DataType dtype, DataLayout layout) { - const auto& out_dims = paddle::framework::make_ddim(shape); + const auto& out_dims = pten::framework::make_ddim(shape); return {dtype, out_dims, layout}; } DenseTensorMeta CreateInferMeta(const ScalarArray& shape, DataType dtype, DataLayout layout) { - const auto& out_dims = paddle::framework::make_ddim(shape.GetData()); + const auto& out_dims = pten::framework::make_ddim(shape.GetData()); return {dtype, out_dims, layout}; } diff --git a/paddle/pten/infermeta/unary.cc b/paddle/pten/infermeta/unary.cc index 843a78f3413cf0572989cff7c4cfd2d2cead362b..27e1dc9511df231ba3c81f9a1ece7dbaafdb2450 100644 --- a/paddle/pten/infermeta/unary.cc +++ b/paddle/pten/infermeta/unary.cc @@ -23,7 +23,7 @@ DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta) { } DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta) { - const auto& out_dims = paddle::framework::make_ddim({1}); + const auto& out_dims = pten::framework::make_ddim({1}); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); return return_meta; } @@ -63,7 +63,7 @@ DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, for (int i = stop_axis + 1; i < in_dims_size; i++) { out_shape.push_back(x_dims[i]); } - const auto& out_dims = paddle::framework::make_ddim(out_shape); + const auto& out_dims = pten::framework::make_ddim(out_shape); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); if (x_dims[0] == return_meta.dims[0]) { @@ -89,10 +89,10 @@ DenseTensorMeta CreateLikeInferMeta(const DenseTensorMeta& x_meta, layout == DataLayout::UNDEFINED ? x_meta.layout : layout}; } -static paddle::framework::DDim ValidateShape( - const std::vector shape, const paddle::framework::DDim& in_dims) { - const int64_t in_size = paddle::framework::product(in_dims); - auto in_dims_vec = paddle::framework::vectorize(in_dims); +static pten::framework::DDim ValidateShape( + const std::vector shape, const pten::framework::DDim& in_dims) { + const int64_t in_size = pten::framework::product(in_dims); + auto in_dims_vec = pten::framework::vectorize(in_dims); bool all_positive = std::all_of(in_dims_vec.cbegin(), in_dims_vec.cend(), [](int64_t i) { return i > 0; }); @@ -112,7 +112,7 @@ static paddle::framework::DDim ValidateShape( paddle::platform::errors::InvalidArgument( "Only one dimension value of 'shape' in ReshapeOp can " "be -1. But received shape = [%s], shape[%d] is also -1.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i)); unk_dim_idx = i; } else if (shape[i] == copy_dim_val) { @@ -124,7 +124,7 @@ static paddle::framework::DDim ValidateShape( "the input tensor X's dimensions. " "But received shape = [%s], shape[%d] = 0, X's shape = [%s], " "X's dimensions = %d.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i, in_dims, in_dims.size())); @@ -136,7 +136,7 @@ static paddle::framework::DDim ValidateShape( "Each dimension value of 'shape' in ReshapeOp must not " "be negative except one unknown dimension. " "But received shape = [%s], shape[%d] = %d.", - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), i, shape[i])); } @@ -165,7 +165,7 @@ static paddle::framework::DDim ValidateShape( "'shape' is [%s], known capacity of 'shape' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } else { output_shape[unk_dim_idx] = -1; @@ -183,7 +183,7 @@ static paddle::framework::DDim ValidateShape( "[%s], the capacity of 'shape' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } } @@ -202,11 +202,11 @@ static paddle::framework::DDim ValidateShape( "capacity of 'Out' is %d.", in_dims, in_size, - paddle::framework::make_ddim(shape), + pten::framework::make_ddim(shape), capacity)); } - return paddle::framework::make_ddim(output_shape); + return pten::framework::make_ddim(output_shape); } DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, @@ -267,7 +267,7 @@ DenseTensorMeta ReduceInferMeta(const DenseTensorMeta& x_meta, out_dim_vector.push_back(1); } } - DDim out_dim = paddle::framework::make_ddim(out_dim_vector); + DDim out_dim = pten::framework::make_ddim(out_dim_vector); DataType out_dtype; if (dtype != DataType::UNDEFINED) { diff --git a/paddle/pten/kernels/cpu/elementwise.h b/paddle/pten/kernels/cpu/elementwise.h index f048678111cf2dc4aa4160e30db66a2a185b01b1..e4f426d3f8eb4895ccaf209fd7626e2f083261c5 100644 --- a/paddle/pten/kernels/cpu/elementwise.h +++ b/paddle/pten/kernels/cpu/elementwise.h @@ -583,8 +583,8 @@ void CommonElementwiseBroadcastBackward(const CPUContext& ctx, } VLOG(3) << "CommonElementwiseBroadcastBackward xdims:" - << paddle::framework::make_ddim(x_dims_array) - << " ydim:" << paddle::framework::make_ddim(y_dims_array); + << pten::framework::make_ddim(x_dims_array) + << " ydim:" << pten::framework::make_ddim(y_dims_array); CommonGradBroadcastCPU(x, y, diff --git a/paddle/pten/kernels/cpu/reduce.h b/paddle/pten/kernels/cpu/reduce.h index b38f17aa02a5563a74a94511681d7d703f127ed0..86443c254bf67388d2613fa1078266edb81319a0 100644 --- a/paddle/pten/kernels/cpu/reduce.h +++ b/paddle/pten/kernels/cpu/reduce.h @@ -50,13 +50,13 @@ void ReduceFunctor(const DeviceContext& context, DDim out_dims = output->dims(); if (keep_dim && x_rank > 1) { const int kDelFlag = -2; - auto dims_vector = paddle::framework::vectorize(out_dims); + auto dims_vector = pten::framework::vectorize(out_dims); for (size_t i = 0; i < dims_ref.size(); ++i) { dims_vector[dims_ref[i]] = kDelFlag; } dims_vector.erase(remove(dims_vector.begin(), dims_vector.end(), kDelFlag), dims_vector.end()); - out_dims = paddle::framework::make_ddim(dims_vector); + out_dims = pten::framework::make_ddim(dims_vector); } auto& place = *context.eigen_device(); Functor functor; diff --git a/paddle/pten/kernels/empty_kernel.cc b/paddle/pten/kernels/empty_kernel.cc index d6a155dca0176d36b74fffcbac61c8d4beb73459..2deac0146c52c267e65c9b9587cae1050874b66c 100644 --- a/paddle/pten/kernels/empty_kernel.cc +++ b/paddle/pten/kernels/empty_kernel.cc @@ -24,7 +24,7 @@ template void EmptyKernel(const Context& dev_ctx, const ScalarArray& shape, DenseTensor* out) { - out->ResizeAndAllocate(paddle::framework::make_ddim(shape.GetData())); + out->ResizeAndAllocate(pten::framework::make_ddim(shape.GetData())); } template diff --git a/paddle/pten/kernels/flatten_grad_kernel.cc b/paddle/pten/kernels/flatten_grad_kernel.cc index e45ac516e16ed32601843e9f69d62bcb98752b26..cbbf62f1993e2d2ce3999e189685452d0a856e11 100644 --- a/paddle/pten/kernels/flatten_grad_kernel.cc +++ b/paddle/pten/kernels/flatten_grad_kernel.cc @@ -25,8 +25,7 @@ void FlattenGradKernel(const Context& dev_ctx, const DenseTensor& xshape, DenseTensor* x_grad) { auto xshape_dims = xshape.dims(); - auto x_dims = - paddle::framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + auto x_dims = pten::framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); pten::Copy(dev_ctx, out_grad, false, x_grad); x_grad->ResizeAndAllocate(x_dims); } diff --git a/paddle/pten/kernels/funcs/common_shape.h b/paddle/pten/kernels/funcs/common_shape.h index 8693fd2b36c4e7bd7d3c4ac2d17c10ca41106005..6bb45ad199510d3338fcfa99e1d39f3b60a9f042 100644 --- a/paddle/pten/kernels/funcs/common_shape.h +++ b/paddle/pten/kernels/funcs/common_shape.h @@ -26,7 +26,7 @@ inline void SetXShape(const DenseTensor &x, DenseTensor *xshape) { for (int i = 0; i < in_dims.size(); ++i) { xshape_dims[i + 1] = in_dims[i]; } - xshape->ResizeAndAllocate(paddle::framework::make_ddim(xshape_dims)); + xshape->ResizeAndAllocate(pten::framework::make_ddim(xshape_dims)); xshape->ResetLoD(x.meta().lod); } diff --git a/paddle/pten/kernels/funcs/elementwise_base.h b/paddle/pten/kernels/funcs/elementwise_base.h index 7396c64de9eab21f7743e1de28831099694352d3..47924c4e2ae189d93bda139fc4d325d8ff7f9529 100644 --- a/paddle/pten/kernels/funcs/elementwise_base.h +++ b/paddle/pten/kernels/funcs/elementwise_base.h @@ -36,10 +36,10 @@ enum ElementwiseType { kUnary = 1, kBinary = 2, kTernary = 3, kAny = -1 }; for supporting multiple-output feature in elementwise system.*/ template using ConditionalT = - typename std::conditional_t>; + typename std::conditional_t>; namespace funcs { -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; template struct ElemwiseGradNoBroadcast { @@ -303,9 +303,9 @@ inline DDim trim_trailing_singular_dims(const DDim &dims) { trim_dims[i] = dims[i]; } if (trim_dims.size() == 0) { - return DDim(paddle::framework::make_dim()); + return DDim(pten::framework::make_dim()); } - DDim actual_dims = paddle::framework::make_ddim(trim_dims); + DDim actual_dims = pten::framework::make_ddim(trim_dims); return actual_dims; } @@ -377,7 +377,7 @@ void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx, DenseTensor *dy, DX_OP dx_op, DY_OP dy_op) { - size_t N = static_cast(paddle::framework::product(x_dim)); + size_t N = static_cast(pten::framework::product(x_dim)); paddle::platform::ForRange for_range(dev_ctx, N); for_range(ElemwiseGradNoBroadcast{ x.data(), @@ -462,7 +462,7 @@ struct ElementwisePrimitiveCaller { template struct ElementwiseWriteDataCaller { __device__ __forceinline__ void operator()( - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, ConditionalT src[VecSize], int block_offset, int num) { @@ -485,7 +485,7 @@ struct ElementwiseWriteDataCaller { template struct ElementwiseWriteDataCaller { __device__ __forceinline__ void operator()( - paddle::framework::Array<_ptr_ OutT *, 1> outs, + pten::framework::Array<_ptr_ OutT *, 1> outs, OutT src[VecSize], int block_offset, int num) { @@ -502,8 +502,8 @@ template __device__ void VectorizedElementwiseKernelImpl( - const paddle::framework::Array &in, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + const pten::framework::Array &in, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, int num, int data_offset, Functor func) { @@ -537,8 +537,8 @@ template __global__ void VectorizedElementwiseKernel( - paddle::framework::Array ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, int size, int main_offset, Functor func) { @@ -578,8 +578,8 @@ void ElementwiseCudaKernel(const KPDevice &ctx, std::vector *outs, Functor func) { auto numel = ins[0]->numel(); - paddle::framework::Array ins_data; - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs_data; + pten::framework::Array ins_data; + pten::framework::Array<_ptr_ OutT *, NumOuts> outs_data; for (int i = 0; i < Arity; ++i) { ins_data[i] = ins[i]->data(); diff --git a/paddle/pten/kernels/funcs/elementwise_functor.h b/paddle/pten/kernels/funcs/elementwise_functor.h index 6b89902456ac8d511165aba50fe1e97c9394ee43..6d139d68530befe57bc0094eb3d5537cf00e660b 100644 --- a/paddle/pten/kernels/funcs/elementwise_functor.h +++ b/paddle/pten/kernels/funcs/elementwise_functor.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/hostdevice.h" #include "paddle/pten/common/float16.h" +#include "paddle/pten/core/hostdevice.h" namespace pten { namespace funcs { diff --git a/paddle/pten/kernels/funcs/transpose.cc b/paddle/pten/kernels/funcs/transpose.cc index 77d26fcbc3536db1b4aa47ce5dab9779991e9afd..90a6859a850910d0daebf586e6eede0febf12fe1 100644 --- a/paddle/pten/kernels/funcs/transpose.cc +++ b/paddle/pten/kernels/funcs/transpose.cc @@ -13,8 +13,8 @@ // limitations under the License. #include "paddle/pten/kernels/funcs/transpose.h" -#include "paddle/fluid/framework/ddim.h" #include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" // See Note [ Why still include the fluid headers? ] @@ -33,8 +33,8 @@ struct TransposeNormal { pten::DenseTensor* out, const std::vector& axis) { const int rank = axis.size(); - auto in_stride = paddle::framework::stride(in.dims()); - auto out_stride = paddle::framework::stride(out->dims()); + auto in_stride = pten::framework::stride(in.dims()); + auto out_stride = pten::framework::stride(out->dims()); const T* in_ptr = in.data(); T* out_ptr = out->mutable_data(); diff --git a/paddle/pten/kernels/funcs/transpose.cu b/paddle/pten/kernels/funcs/transpose.cu index 045bfdbdb051c9d5897ee60483c9200d5ead3e7e..474a7c4ea4de9254ae3b028cc925a5154d8d8787 100644 --- a/paddle/pten/kernels/funcs/transpose.cu +++ b/paddle/pten/kernels/funcs/transpose.cu @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/kernels/funcs/transpose.h" @@ -58,8 +58,8 @@ struct TransposeNormal { pten::DenseTensor* out, const std::vector& axis) { const int rank = axis.size(); - auto in_stride = paddle::framework::stride(in.dims()); - auto out_stride = paddle::framework::stride(out->dims()); + auto in_stride = pten::framework::stride(in.dims()); + auto out_stride = pten::framework::stride(out->dims()); auto* in_ptr = in.data(); auto* out_ptr = out->mutable_data(); diff --git a/paddle/pten/kernels/funcs/transpose.h b/paddle/pten/kernels/funcs/transpose.h index d0e4dafe2c3b8de56b0feaecd7ddd6e442d9c1a5..0cb2b4289fe6ef9f383eb20d241fb71430fb3634 100644 --- a/paddle/pten/kernels/funcs/transpose.h +++ b/paddle/pten/kernels/funcs/transpose.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/framework/ddim.h" +#include "paddle/pten/core/ddim.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/fluid/operators/eigen/eigen_function.h" diff --git a/paddle/pten/kernels/gpu/elementwise.h b/paddle/pten/kernels/gpu/elementwise.h index c3ff91e7b15cd6d62fa8a49e01c0e168d4669cba..def54e24840e7973f94c1bfce447327523984a55 100644 --- a/paddle/pten/kernels/gpu/elementwise.h +++ b/paddle/pten/kernels/gpu/elementwise.h @@ -130,14 +130,14 @@ struct DimensionsTransform { public: explicit DimensionsTransform(const std::vector &ins, - const paddle::framework::DDim &dims, + const pten::framework::DDim &dims, int axis) { const int N = ins.size(); dim_size = dims.size(); - out_dims = paddle::framework::vectorize(dims); + out_dims = pten::framework::vectorize(dims); in_dims.resize(N); for (int j = 0; j < N; ++j) { - in_dims[j] = paddle::framework::vectorize(ins[j]->dims()); + in_dims[j] = pten::framework::vectorize(ins[j]->dims()); } InputDimensionsExtend(N, axis); @@ -214,11 +214,11 @@ template __device__ void ElementwiseBroadcastKernelImpl( - const paddle::framework::Array &ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, - const paddle::framework::Array &use_broadcast, + const pten::framework::Array &ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, + const pten::framework::Array &use_broadcast, uint32_t numel, - const paddle::framework::Array, Arity> + const pten::framework::Array, Arity> &configs, int num, int block_offset, @@ -259,12 +259,11 @@ template __global__ void ElementwiseBroadcastKernel( - paddle::framework::Array ins, - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs, - paddle::framework::Array use_broadcast, + pten::framework::Array ins, + pten::framework::Array<_ptr_ OutT *, NumOuts> outs, + pten::framework::Array use_broadcast, uint32_t numel, - paddle::framework::Array, Arity> - configs, + pten::framework::Array, Arity> configs, int main_offset, int tail_tid, Functor func) { @@ -345,10 +344,10 @@ void LaunchKernel(const KPDevice &ctx, Functor func, DimensionsTransform merge_dims) { int numel = (*outs)[0]->numel(); - paddle::framework::Array, Arity> configs; - paddle::framework::Array use_broadcast; - paddle::framework::Array ins_data; - paddle::framework::Array<_ptr_ OutT *, NumOuts> outs_data; + pten::framework::Array, Arity> configs; + pten::framework::Array use_broadcast; + pten::framework::Array ins_data; + pten::framework::Array<_ptr_ OutT *, NumOuts> outs_data; for (int i = 0; i < NumOuts; ++i) { outs_data[i] = (*outs)[i]->mutable_data(); @@ -444,7 +443,7 @@ void LaunchBroadcastKernelForDifferentVecSize( "The maximum dimension of input tensor is expected to be less than " "%d, but recieved %d.\n", merge_dims.dim_size, - paddle::framework::DDim::kMaxRank)); + pten::framework::DDim::kMaxRank)); } } #undef CALL_BROADCAST_FOR_DIM_SIZE @@ -1826,8 +1825,8 @@ void CommonElementwiseBroadcastBackward(const GPUContext &ctx, } VLOG(3) << "CommonElementwiseBroadcastBackward xdims:" - << paddle::framework::make_ddim(x_dims_array) - << " ydim:" << paddle::framework::make_ddim(y_dims_array); + << pten::framework::make_ddim(x_dims_array) + << " ydim:" << pten::framework::make_ddim(y_dims_array); CommonGradBroadcastCUDA(x, y, diff --git a/paddle/pten/kernels/gpu/reduce.h b/paddle/pten/kernels/gpu/reduce.h index e7d1d2d5f44fc142a290b496235634fbc19274c1..e247f786cc68d84fd6434695a9bf85ea6fabcad0 100644 --- a/paddle/pten/kernels/gpu/reduce.h +++ b/paddle/pten/kernels/gpu/reduce.h @@ -32,7 +32,6 @@ namespace cub = hipcub; #endif -#include "paddle/fluid/framework/array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" @@ -41,6 +40,7 @@ namespace cub = hipcub; #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/fast_divmod.h" #include "paddle/fluid/string/string_helper.h" +#include "paddle/pten/core/array.h" #include "paddle/pten/api/ext/dispatch.h" #include "paddle/pten/backends/gpu/gpu_context.h" @@ -118,7 +118,7 @@ static inline void CheckReduceRank(int reduce_rank, int rank) { // convert dims from vector to array template -static inline paddle::framework::Array VectorToArray( +static inline pten::framework::Array VectorToArray( const VectorLikeType& vec) { PADDLE_ENFORCE_LE(vec.size(), ElementCount, @@ -128,7 +128,7 @@ static inline paddle::framework::Array VectorToArray( vec.size(), ElementCount)); size_t n = static_cast(vec.size()); - paddle::framework::Array ret; + pten::framework::Array ret; for (size_t i = 0; i < n; ++i) { ret[i] = vec[i]; } @@ -162,7 +162,7 @@ static inline std::vector GetReduceDim(const std::vector& dims, } // namespace details -constexpr int kMaxRank = paddle::framework::DDim::kMaxRank; +constexpr int kMaxRank = pten::framework::DDim::kMaxRank; enum ReduceType { kReduceLastDim = 0x01, // when reduce_dim[0] == x_dim.size() - 1; @@ -202,9 +202,9 @@ struct IndexCalculator { } int dim; - paddle::framework::Array dims; - paddle::framework::Array strides; - paddle::framework::Array divmoders; + pten::framework::Array dims; + pten::framework::Array strides; + pten::framework::Array divmoders; }; template @@ -326,7 +326,7 @@ struct ReduceConfig { const paddle::platform::Place& place, pten::DenseTensor* tmp) { if (should_reduce_again) { - tmp->ResizeAndAllocate(paddle::framework::make_ddim( + tmp->ResizeAndAllocate(pten::framework::make_ddim( {static_cast(left_num * grid.z * grid.y * sizeof(Ty))})); output_data = tmp->mutable_data(); } else { @@ -1029,7 +1029,7 @@ static pten::DenseTensor tmp = pten::DenseTensor( pten::make_intrusive(place), pten::DenseTensorMeta(pten::DataType::UINT8, - paddle::framework::make_ddim( + pten::framework::make_ddim( {static_cast(temp_storage_bytes)}))); auto* temp_storage = tmp.mutable_data(); @@ -1073,7 +1073,7 @@ void TensorReduceFunctorImpl(const pten::DenseTensor& x, // Allocate memory y->mutable_data(); - auto x_dim = paddle::framework::vectorize(x.dims()); + auto x_dim = pten::framework::vectorize(x.dims()); auto config = ReduceConfig(origin_reduce_dims, x_dim); config.Run(); int numel = x.numel(); diff --git a/paddle/pten/kernels/impl/dot_grad_kernel_impl.h b/paddle/pten/kernels/impl/dot_grad_kernel_impl.h index 39cdbad5146deb3235a78d823fbbcf51369ead18..557f6fae7b7f98efd17e0447d7c9c13498e420bf 100644 --- a/paddle/pten/kernels/impl/dot_grad_kernel_impl.h +++ b/paddle/pten/kernels/impl/dot_grad_kernel_impl.h @@ -103,7 +103,7 @@ struct DotGradFunctionmutable_data(); const auto* data_y = tensor_y->data(); const DDim& dim = tensor_x->dims(); - size_t N = static_cast(paddle::framework::product(dim)); + size_t N = static_cast(pten::framework::product(dim)); auto step = dim[dim.size() - 1]; @@ -118,7 +118,7 @@ struct DotGradFunctionmutable_data(); const auto* data_x = tensor_x->data(); const DDim& dim = tensor_y->dims(); - size_t N = static_cast(paddle::framework::product(dim)); + size_t N = static_cast(pten::framework::product(dim)); auto step = dim[dim.size() - 1]; diff --git a/paddle/pten/kernels/impl/full_kernel_impl.h b/paddle/pten/kernels/impl/full_kernel_impl.h index 134a815799de6004d6e84bc68db8800fe278d2a0..2900e2e83bd659b8fe5b6fbd1af12ec01524d52e 100644 --- a/paddle/pten/kernels/impl/full_kernel_impl.h +++ b/paddle/pten/kernels/impl/full_kernel_impl.h @@ -36,7 +36,7 @@ void FullKernel(const Context& dev_ctx, const ScalarArray& shape, const Scalar& val, DenseTensor* out) { - out->ResizeAndAllocate(paddle::framework::make_ddim(shape.GetData())); + out->ResizeAndAllocate(pten::framework::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); } diff --git a/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h b/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h index b1bae78ddc5fa27ab60d3065a9c2e1455113dfa0..71fadfae7deb822d5997491e2eaf8b413a8647fc 100644 --- a/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h +++ b/paddle/pten/kernels/impl/matmul_grad_kernel_impl.h @@ -135,7 +135,7 @@ static DDim RowMatrixFromVector(const DDim& x_dim) { if (x_dim.size() > 1) { return x_dim; } - return paddle::framework::make_ddim({1, x_dim[0]}); + return pten::framework::make_ddim({1, x_dim[0]}); } /** @@ -146,7 +146,7 @@ static DDim ColumnMatrixFromVector(const DDim& y_dim) { if (y_dim.size() > 1) { return y_dim; } - return paddle::framework::make_ddim({y_dim[0], 1}); + return pten::framework::make_ddim({y_dim[0], 1}); } /** diff --git a/paddle/pten/kernels/impl/matmul_kernel_impl.h b/paddle/pten/kernels/impl/matmul_kernel_impl.h index 5ea9729655ecc8b181a513df05e5b155a39f61cd..afe6bf71e2f6b453031863ff1ac9d67f32f79e65 100644 --- a/paddle/pten/kernels/impl/matmul_kernel_impl.h +++ b/paddle/pten/kernels/impl/matmul_kernel_impl.h @@ -164,7 +164,7 @@ void MatMulFunction(const Context& dev_ctx, std::copy_n(y_dims.cbegin(), y_ndim - 2, out_dims.begin()); out_dims.back() = y_dims.back(); } - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_dims)); Out->mutable_data(); if (trans_y) { const int M = Y.numel() / N; @@ -242,7 +242,7 @@ void MatMulFunction(const Context& dev_ctx, } else { std::copy_n(x_dims.cbegin(), x_ndim - 1, out_dims.begin()); } - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_dims)); Out->mutable_data(); if (trans_x) { @@ -330,7 +330,7 @@ void MatMulFunction(const Context& dev_ctx, out_broadcast_dims[ndim - 2] = M; out_broadcast_dims[ndim - 1] = N; - Out->ResizeAndAllocate(paddle::framework::make_ddim(out_broadcast_dims)); + Out->ResizeAndAllocate(pten::framework::make_ddim(out_broadcast_dims)); Out->mutable_data(); const int batch_dim = ndim - 2; @@ -493,12 +493,12 @@ void MatmulKernel(const Context& dev_ctx, bool transpose_x, bool transpose_y, DenseTensor* out) { - PADDLE_ENFORCE_NE(paddle::framework::product(x.dims()), + PADDLE_ENFORCE_NE(pten::framework::product(x.dims()), 0, paddle::platform::errors::InvalidArgument( "The Input(X) dims size must not be equal 0," " but reviced dims size is 0. ")); - PADDLE_ENFORCE_NE(paddle::framework::product(y.dims()), + PADDLE_ENFORCE_NE(pten::framework::product(y.dims()), 0, paddle::platform::errors::InvalidArgument( "The Input(Y) dims size must not be equal 0," diff --git a/paddle/pten/tests/api/test_cast_api.cc b/paddle/pten/tests/api/test_cast_api.cc index 6608d1ed08cab5b8c2db9a0197bdd9a10ba96d4c..0a3b56e3f18d4dfccfdf5f56e5b690fffcd33ddc 100644 --- a/paddle/pten/tests/api/test_cast_api.cc +++ b/paddle/pten/tests/api/test_cast_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, cast) { diff --git a/paddle/pten/tests/api/test_conj_api.cc b/paddle/pten/tests/api/test_conj_api.cc index 50d190257a16dc4d7a605f0e4d3662b5f45a6dfd..c17b0f23f4f6b7751009b69b7a504570c1f70a9d 100644 --- a/paddle/pten/tests/api/test_conj_api.cc +++ b/paddle/pten/tests/api/test_conj_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, conj) { diff --git a/paddle/pten/tests/api/test_dot_api.cc b/paddle/pten/tests/api/test_dot_api.cc index 40e709b960334b034f3aa46ef72ddeb02436b5fb..97616d0cbcd57750c5e8ee3464fb167313713fc8 100644 --- a/paddle/pten/tests/api/test_dot_api.cc +++ b/paddle/pten/tests/api/test_dot_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, dot) { diff --git a/paddle/pten/tests/api/test_elementwise_api.cc b/paddle/pten/tests/api/test_elementwise_api.cc index 69af32eb457a6ea7ef5bbbb80c33fd2276febb7b..17a6ffde9df0abf5be8333b21b2cef432ce89b1f 100644 --- a/paddle/pten/tests/api/test_elementwise_api.cc +++ b/paddle/pten/tests/api/test_elementwise_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, add) { diff --git a/paddle/pten/tests/api/test_empty_api.cc b/paddle/pten/tests/api/test_empty_api.cc index f4e3f472c7990de4829017f50fb0530aeb4a62db..f38e91b02b7051800820d9547a4cf68ade5cc67d 100644 --- a/paddle/pten/tests/api/test_empty_api.cc +++ b/paddle/pten/tests/api/test_empty_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, empty_like) { diff --git a/paddle/pten/tests/api/test_fill_api.cc b/paddle/pten/tests/api/test_fill_api.cc index 0d823765680e8a05c0e054a9b0a44deedd1b58c0..7910cc840f5efdee10406d81bcbda1385e4eb39c 100644 --- a/paddle/pten/tests/api/test_fill_api.cc +++ b/paddle/pten/tests/api/test_fill_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, full_like) { diff --git a/paddle/pten/tests/api/test_flatten_api.cc b/paddle/pten/tests/api/test_flatten_api.cc index 6c082b9653e6f9b0ab0b09016938b9b3bfe65159..cf8fa9cb1895fb9d7f04059ae979050a280a40fa 100644 --- a/paddle/pten/tests/api/test_flatten_api.cc +++ b/paddle/pten/tests/api/test_flatten_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, flatten) { diff --git a/paddle/pten/tests/api/test_matmul_api.cc b/paddle/pten/tests/api/test_matmul_api.cc index 03f686f1c3f5e68e421b6068baab5d320763c198..08e0e888b99edd86dd4b05c7998c90596e646881 100644 --- a/paddle/pten/tests/api/test_matmul_api.cc +++ b/paddle/pten/tests/api/test_matmul_api.cc @@ -26,7 +26,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(API, matmul_cpu) { // 1. create tensor diff --git a/paddle/pten/tests/api/test_mean_api.cc b/paddle/pten/tests/api/test_mean_api.cc index 9d90e58101cbd8c4f5d710728ffc1c660226822d..a7b85cff12cc1cfb0070a7527d653cf42807dbe5 100644 --- a/paddle/pten/tests/api/test_mean_api.cc +++ b/paddle/pten/tests/api/test_mean_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, mean) { diff --git a/paddle/pten/tests/api/test_reshape_api.cc b/paddle/pten/tests/api/test_reshape_api.cc index 59e9e9fab1122ce4a448281937a9f9a944cba37c..bfd1ea841443f2940d4cbf4e8a0cb2ead2decbd8 100644 --- a/paddle/pten/tests/api/test_reshape_api.cc +++ b/paddle/pten/tests/api/test_reshape_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, reshape) { diff --git a/paddle/pten/tests/api/test_scale_api.cc b/paddle/pten/tests/api/test_scale_api.cc index 5ad52142765ba3b1faf9876d7707d75e3b7615b3..bb5523d26c4e19974d404efa85bc35e79ffaab58 100644 --- a/paddle/pten/tests/api/test_scale_api.cc +++ b/paddle/pten/tests/api/test_scale_api.cc @@ -24,7 +24,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; void CheckScaleResult(experimental::Tensor* out) { ASSERT_EQ(out->dims().size(), 2); diff --git a/paddle/pten/tests/api/test_sum_api.cc b/paddle/pten/tests/api/test_sum_api.cc index 5a7c9840e11143f71ffae811c9dbfd94306d5f73..c0d5a89eeb7447d84516988ec8f34422c162267b 100644 --- a/paddle/pten/tests/api/test_sum_api.cc +++ b/paddle/pten/tests/api/test_sum_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(API, sum) { diff --git a/paddle/pten/tests/api/test_to_api.cc b/paddle/pten/tests/api/test_to_api.cc index 9aef716029a692d9c281674a5ef9f188ef3e5c74..fa999aace66784ac117a183335f45ac7585cbfbb 100644 --- a/paddle/pten/tests/api/test_to_api.cc +++ b/paddle/pten/tests/api/test_to_api.cc @@ -25,7 +25,7 @@ namespace paddle { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; paddle::experimental::Tensor CreateInputTensor() { const auto alloc = std::make_unique( diff --git a/paddle/pten/tests/kernels/test_cast_dev_api.cc b/paddle/pten/tests/kernels/test_cast_dev_api.cc index 80328d0b243e8d74a6d5aa804502c7da6b287db2..3b1412a8e5f4e368e0776a7dbca1f6a47eac8ec3 100644 --- a/paddle/pten/tests/kernels/test_cast_dev_api.cc +++ b/paddle/pten/tests/kernels/test_cast_dev_api.cc @@ -28,7 +28,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, cast) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_conj_dev_api.cc b/paddle/pten/tests/kernels/test_conj_dev_api.cc index 6f2ea0602b81d21d62d33ef8677d36c9c2814364..51066d8ae478397378bf5986edf2c0704ae7e005 100644 --- a/paddle/pten/tests/kernels/test_conj_dev_api.cc +++ b/paddle/pten/tests/kernels/test_conj_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, conj) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_copy_dev_api.cc b/paddle/pten/tests/kernels/test_copy_dev_api.cc index d690b29d71f6fd1878bb28436727959bf16ad298..4f8bd727716cef9641d33cc8603ac81631bc24e4 100644 --- a/paddle/pten/tests/kernels/test_copy_dev_api.cc +++ b/paddle/pten/tests/kernels/test_copy_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(YuanRisheng): This TEST file need to be refactored after 'copy' realized // in 'paddle/api' diff --git a/paddle/pten/tests/kernels/test_creation_dev_api.cc b/paddle/pten/tests/kernels/test_creation_dev_api.cc index b1c23d4a768e6145de72b585e5d03b2901c7b577..1aa21b847fac4500c23b67f1fe9adb20331bc382 100644 --- a/paddle/pten/tests/kernels/test_creation_dev_api.cc +++ b/paddle/pten/tests/kernels/test_creation_dev_api.cc @@ -27,7 +27,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, empty) { // 1. create input diff --git a/paddle/pten/tests/kernels/test_dot_dev_api.cc b/paddle/pten/tests/kernels/test_dot_dev_api.cc index 4213240f57ba8d4182f46c8af6c8c088bcbb0634..e4978d84c835cfaab55ad2b9b354d79872cccd79 100644 --- a/paddle/pten/tests/kernels/test_dot_dev_api.cc +++ b/paddle/pten/tests/kernels/test_dot_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, dot) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc index 23583a843561b6dd21ce785e33e2a2c4586c41ca..0bc16371c0731e3f3446c4f52de3b8897f1d742a 100644 --- a/paddle/pten/tests/kernels/test_elementwise_dev_api.cc +++ b/paddle/pten/tests/kernels/test_elementwise_dev_api.cc @@ -26,7 +26,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, add) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_flatten_dev_api.cc b/paddle/pten/tests/kernels/test_flatten_dev_api.cc index 13fc327b669452620d63f5da937a33fdfa588301..78cd6261c3a41df1edbd9b8d8cc723f4fadcf0c0 100644 --- a/paddle/pten/tests/kernels/test_flatten_dev_api.cc +++ b/paddle/pten/tests/kernels/test_flatten_dev_api.cc @@ -36,7 +36,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, flatten) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_matmul_dev_api.cc b/paddle/pten/tests/kernels/test_matmul_dev_api.cc index 118215db505d53dbdcdfd4bc69d3e01ab98d3c43..76f775031921097d74d5ced0c34db8c45290d701 100644 --- a/paddle/pten/tests/kernels/test_matmul_dev_api.cc +++ b/paddle/pten/tests/kernels/test_matmul_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, dot) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_mean_dev_api.cc b/paddle/pten/tests/kernels/test_mean_dev_api.cc index a8860540fd0c9b5a5fdccba3f0957a2c2eb04a20..07ec30afad5ca92a62a95c37e6afd4bb9639dc04 100644 --- a/paddle/pten/tests/kernels/test_mean_dev_api.cc +++ b/paddle/pten/tests/kernels/test_mean_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, mean) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_reshape_dev_api.cc b/paddle/pten/tests/kernels/test_reshape_dev_api.cc index 52038593d7012d277247caf666843078e1abb8e0..dc90043305ca022347fa611ad08fd4a0bc2c79dd 100644 --- a/paddle/pten/tests/kernels/test_reshape_dev_api.cc +++ b/paddle/pten/tests/kernels/test_reshape_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; // TODO(chenweihang): Remove this test after the API is used in the dygraph TEST(DEV_API, reshape) { diff --git a/paddle/pten/tests/kernels/test_scale_dev_api.cc b/paddle/pten/tests/kernels/test_scale_dev_api.cc index 1c0be6c06aacdf52634d7c2801591c9ae7a94bed..106835a204c65c3ae3f48aad512635bf1a1a9d6e 100644 --- a/paddle/pten/tests/kernels/test_scale_dev_api.cc +++ b/paddle/pten/tests/kernels/test_scale_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, scale) { // 1. create tensor diff --git a/paddle/pten/tests/kernels/test_sum_dev_api.cc b/paddle/pten/tests/kernels/test_sum_dev_api.cc index 2b11ba9595c53d7abd755449bb0b41a9cf4af435..41d694a025f42e54a6dd347476deca7ba921c64c 100644 --- a/paddle/pten/tests/kernels/test_sum_dev_api.cc +++ b/paddle/pten/tests/kernels/test_sum_dev_api.cc @@ -25,7 +25,7 @@ namespace pten { namespace tests { namespace framework = paddle::framework; -using DDim = paddle::framework::DDim; +using DDim = pten::framework::DDim; TEST(DEV_API, sum) { // 1. create tensor