diff --git a/paddle/pten/kernels/complex_kernel.h b/paddle/pten/kernels/complex_kernel.h index b57e6d0fb4e0943afdac499ada06aeefcdac4a29..dfe8fff43e6efa5080cbd57c5f3816fb3c0e9c2d 100644 --- a/paddle/pten/kernels/complex_kernel.h +++ b/paddle/pten/kernels/complex_kernel.h @@ -18,7 +18,7 @@ limitations under the License. */ namespace pten { -template -void Conj(const ContextT& dev_ctx, const DenseTensor& x, DenseTensor* out); +template +void Conj(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out); } // namespace pten diff --git a/paddle/pten/kernels/copy_kernel.h b/paddle/pten/kernels/copy_kernel.h index d095d18a371f0913cafa793fae7392326b39934e..a481908892e9b7d03f5b0e87051122218532dcef 100644 --- a/paddle/pten/kernels/copy_kernel.h +++ b/paddle/pten/kernels/copy_kernel.h @@ -18,8 +18,8 @@ limitations under the License. */ namespace pten { -template -void Copy(const ContextT& dev_ctx, +template +void Copy(const Context& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst); diff --git a/paddle/pten/kernels/cpu/copy_kernel.cc b/paddle/pten/kernels/cpu/copy_kernel.cc index 6a81579eb4f0333c78df169ea82a52cec1a44eed..f3c4156fcddf0fcdc507b2e69da0477ccf42efcc 100644 --- a/paddle/pten/kernels/cpu/copy_kernel.cc +++ b/paddle/pten/kernels/cpu/copy_kernel.cc @@ -25,8 +25,8 @@ limitations under the License. */ namespace pten { // NOTE(chenweihang): blocking is useless in cpu kernel -template -void Copy(const ContextT& dev_ctx, +template +void Copy(const Context& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst) { diff --git a/paddle/pten/kernels/cpu/dot_kernel.cc b/paddle/pten/kernels/cpu/dot_kernel.cc index c485cc8ac5672ae5c258fefce78743fe9db06abc..247ad1216a266005fc770c7941609cfff796e6b3 100644 --- a/paddle/pten/kernels/cpu/dot_kernel.cc +++ b/paddle/pten/kernels/cpu/dot_kernel.cc @@ -22,8 +22,8 @@ namespace pten { -template -void Dot(const ContextT& dev_ctx, +template +void Dot(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { diff --git a/paddle/pten/kernels/dot_kernel.h b/paddle/pten/kernels/dot_kernel.h index f6db41cbbe238b5c12c9423eafebb372428f63b9..9924749cd21419e864a0defa8b6caa43179c834a 100644 --- a/paddle/pten/kernels/dot_kernel.h +++ b/paddle/pten/kernels/dot_kernel.h @@ -18,8 +18,8 @@ namespace pten { -template -void Dot(const ContextT& dev_ctx, +template +void Dot(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out); diff --git a/paddle/pten/kernels/flatten_kernel.cc b/paddle/pten/kernels/flatten_kernel.cc index 94605741708dffe08936fdf54dc84fc84011e23e..df8238cbf3a91fb8cf878d252512b1b6df2cab92 100644 --- a/paddle/pten/kernels/flatten_kernel.cc +++ b/paddle/pten/kernels/flatten_kernel.cc @@ -21,8 +21,8 @@ namespace pten { -template -void Flatten(const ContextT& dev_ctx, +template +void Flatten(const Context& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, @@ -35,14 +35,14 @@ void Flatten(const ContextT& dev_ctx, // TODO(yuanrisheng): this kernel is for training and xshape is a Intermediate // Output Tensor, // is there a more flexible way to deal with this case? -template -void FlattenWithXShape(const ContextT& dev_ctx, +template +void FlattenWithXShape(const Context& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, DenseTensor* out, DenseTensor* xshape) { - Flatten(dev_ctx, x, start_axis, stop_axis, out); + Flatten(dev_ctx, x, start_axis, stop_axis, out); funcs::SetXShape(x, xshape); } diff --git a/paddle/pten/kernels/flatten_kernel.h b/paddle/pten/kernels/flatten_kernel.h index 6ce0a2be20a9d2dab332c5b0edecfeb9dd85acb9..5a0445489bcf38cd0e8ee2e7474682b34dd7dd53 100644 --- a/paddle/pten/kernels/flatten_kernel.h +++ b/paddle/pten/kernels/flatten_kernel.h @@ -18,15 +18,15 @@ limitations under the License. */ namespace pten { -template -void Flatten(const ContextT& dev_ctx, +template +void Flatten(const Context& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, DenseTensor* out); -template -void FlattenWithXShape(const ContextT& dev_ctx, +template +void FlattenWithXShape(const Context& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, diff --git a/paddle/pten/kernels/full_kernel.h b/paddle/pten/kernels/full_kernel.h index d1139cf9ecefebf30d8c7fcdc1f3c23670c7f717..5bf6e37c36e576594096b8d5cadec63f4e514621 100644 --- a/paddle/pten/kernels/full_kernel.h +++ b/paddle/pten/kernels/full_kernel.h @@ -20,13 +20,13 @@ namespace pten { -template -void Full(const ContextT& dev_ctx, +template +void Full(const Context& dev_ctx, const ScalarArray& shape, const Scalar& val, DenseTensor* out); -template -void FullLike(const ContextT& dev_ctx, const Scalar& val, DenseTensor* out); +template +void FullLike(const Context& dev_ctx, const Scalar& val, DenseTensor* out); } // namespace pten diff --git a/paddle/pten/kernels/gpu/copy_kernel.cu b/paddle/pten/kernels/gpu/copy_kernel.cu index cb9f8054d3b0f4f13251299c4f0f0c921dba4b06..877a06ce33e5d3ae71b146bba07a98d8a8116e4c 100644 --- a/paddle/pten/kernels/gpu/copy_kernel.cu +++ b/paddle/pten/kernels/gpu/copy_kernel.cu @@ -24,8 +24,8 @@ limitations under the License. */ namespace pten { -template -void Copy(const ContextT& dev_ctx, +template +void Copy(const Context& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst) { diff --git a/paddle/pten/kernels/gpu/dot_kernel.cu b/paddle/pten/kernels/gpu/dot_kernel.cu index 7742e57a026539d9169ff7bfe991c50b76ea4b23..6b66d45b7dd48ec0376b744387ebf5e6a94920b5 100644 --- a/paddle/pten/kernels/gpu/dot_kernel.cu +++ b/paddle/pten/kernels/gpu/dot_kernel.cu @@ -24,8 +24,8 @@ namespace pten { -template -void Dot(const ContextT& dev_ctx, +template +void Dot(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { diff --git a/paddle/pten/kernels/impl/complex_kernel_impl.h b/paddle/pten/kernels/impl/complex_kernel_impl.h index 7b5cabd6806e20a1f5c159e01b8aa14f3c25bc86..6f3a6049faa9a75915814c146bda032eeef71a41 100644 --- a/paddle/pten/kernels/impl/complex_kernel_impl.h +++ b/paddle/pten/kernels/impl/complex_kernel_impl.h @@ -20,13 +20,13 @@ namespace pten { -template -void Conj(const ContextT& dev_ctx, const DenseTensor& x, DenseTensor* out) { +template +void Conj(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out) { auto numel = x.numel(); auto* x_data = x.data(); auto* out_data = out->mutable_data(); - paddle::platform::ForRange for_range(dev_ctx, numel); + paddle::platform::ForRange for_range(dev_ctx, numel); paddle::operators::math::ConjFunctor functor(x_data, numel, out_data); for_range(functor); } diff --git a/paddle/pten/kernels/impl/full_kernel_impl.h b/paddle/pten/kernels/impl/full_kernel_impl.h index 7076bb51b36212c43ef27b66e797335346a42fb8..c77b7a7077824fbe44d5730835136fba6b7f929f 100644 --- a/paddle/pten/kernels/impl/full_kernel_impl.h +++ b/paddle/pten/kernels/impl/full_kernel_impl.h @@ -24,24 +24,24 @@ limitations under the License. */ namespace pten { -template -void fill_(const DeviceContext& context, DenseTensor* tensor, VType val) { +template +void FullValue(const Context& dev_ctx, DenseTensor* tensor, VType val) { tensor->mutable_data(); auto t = pten::EigenVector::Flatten(*tensor); - t.device(*context.eigen_device()) = t.constant(static_cast(val)); + t.device(*dev_ctx.eigen_device()) = t.constant(static_cast(val)); } -template -void Full(const ContextT& dev_ctx, +template +void Full(const Context& dev_ctx, const ScalarArray& shape, const Scalar& val, DenseTensor* out) { out->Resize(paddle::framework::make_ddim(shape.GetData())); - fill_(dev_ctx, out, val.to()); + FullValue(dev_ctx, out, val.to()); } -template -void FullLike(const ContextT& dev_ctx, const Scalar& val, DenseTensor* out) { +template +void FullLike(const Context& dev_ctx, const Scalar& val, DenseTensor* out) { auto value = val.to(); using CommonType = typename std::common_type< float, @@ -66,7 +66,7 @@ void FullLike(const ContextT& dev_ctx, const Scalar& val, DenseTensor* out) { static_cast(std::numeric_limits::lowest()), static_cast(std::numeric_limits::max()), static_cast(value))); - fill_(dev_ctx, out, value); + FullValue(dev_ctx, out, value); } } // namespace pten diff --git a/paddle/pten/kernels/impl/scale_kernel_impl.h b/paddle/pten/kernels/impl/scale_kernel_impl.h index 421bb9f7b004251a5110ada09bad6107d5a86259..937b3115e63b37003d34cca35aa8b6c949fcfdc6 100644 --- a/paddle/pten/kernels/impl/scale_kernel_impl.h +++ b/paddle/pten/kernels/impl/scale_kernel_impl.h @@ -23,8 +23,8 @@ limitations under the License. */ namespace pten { -template -void Scale(const ContextT& dev_ctx, +template +void Scale(const Context& dev_ctx, const DenseTensor& x, const Scalar& scale, float bias, diff --git a/paddle/pten/kernels/impl/sign_kernel_impl.h b/paddle/pten/kernels/impl/sign_kernel_impl.h index 088690ec648082a73b81ed9c0e8469a271308636..d663808f03792a40aaf84756777958c50387f8cf 100644 --- a/paddle/pten/kernels/impl/sign_kernel_impl.h +++ b/paddle/pten/kernels/impl/sign_kernel_impl.h @@ -22,8 +22,8 @@ limitations under the License. */ namespace pten { -template -void Sign(const ContextT& dev_ctx, const DenseTensor& x, DenseTensor* out) { +template +void Sign(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out) { out->mutable_data(); auto eigen_out = pten::EigenVector::Flatten(*out); auto eigen_x = pten::EigenVector::Flatten(x); diff --git a/paddle/pten/kernels/reshape_kernel.cc b/paddle/pten/kernels/reshape_kernel.cc index ea1f03a017ac91cb2caf7b91a57a8ea905258b99..0535ea20c8cb0878914b679f5025e6bc63b50c44 100644 --- a/paddle/pten/kernels/reshape_kernel.cc +++ b/paddle/pten/kernels/reshape_kernel.cc @@ -21,8 +21,8 @@ namespace pten { -template -void Reshape(const ContextT& dev_ctx, +template +void Reshape(const Context& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* out) { @@ -36,8 +36,8 @@ void Reshape(const ContextT& dev_ctx, out->ResetLoD(x.lod()); } -template -void ReshapeWithXShape(const ContextT& dev_ctx, +template +void ReshapeWithXShape(const Context& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* xshape, diff --git a/paddle/pten/kernels/reshape_kernel.h b/paddle/pten/kernels/reshape_kernel.h index d9ccd0449bff0206d45a9a130c55e4c3e1b08597..b10e31a434c008ebbdb458a95fabb2f0892405c1 100644 --- a/paddle/pten/kernels/reshape_kernel.h +++ b/paddle/pten/kernels/reshape_kernel.h @@ -19,14 +19,14 @@ limitations under the License. */ namespace pten { -template -void Reshape(const ContextT& dev_ctx, +template +void Reshape(const Context& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* out); -template -void ReshapeWithXShape(const ContextT& dev_ctx, +template +void ReshapeWithXShape(const Context& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* xshape, diff --git a/paddle/pten/kernels/scale_kernel.h b/paddle/pten/kernels/scale_kernel.h index bb3c1968fce9e4487d3eeb2295995d1493cc6467..5908050029c7ae683b9f64074f9f54022bd994ce 100644 --- a/paddle/pten/kernels/scale_kernel.h +++ b/paddle/pten/kernels/scale_kernel.h @@ -19,8 +19,8 @@ limitations under the License. */ namespace pten { -template -void Scale(const ContextT& dev_ctx, +template +void Scale(const Context& dev_ctx, const DenseTensor& x, const Scalar& scale, float bias, diff --git a/paddle/pten/kernels/sign_kernel.h b/paddle/pten/kernels/sign_kernel.h index bbb3f45c9a2c1cc61cfaf3d5f789e401a5f729c8..2cf5ca973f0936ed1a006e02b720fdb76da9e7e5 100644 --- a/paddle/pten/kernels/sign_kernel.h +++ b/paddle/pten/kernels/sign_kernel.h @@ -18,7 +18,7 @@ limitations under the License. */ namespace pten { -template -void Sign(const ContextT& dev_ctx, const DenseTensor& x, DenseTensor* out); +template +void Sign(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out); } // namespace pten diff --git a/paddle/pten/kernels/xpu/copy_kernel.cc b/paddle/pten/kernels/xpu/copy_kernel.cc index 479ef50836622614ec7bde9888acdda059266b96..190eb39e22ecd029ae41ac8a6cb4fa5513f3e00f 100644 --- a/paddle/pten/kernels/xpu/copy_kernel.cc +++ b/paddle/pten/kernels/xpu/copy_kernel.cc @@ -24,8 +24,8 @@ limitations under the License. */ namespace pten { -template -void Copy(const ContextT& dev_ctx, +template +void Copy(const Context& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst) {