未验证 提交 7e0292ea 编写于 作者: C chentianyu03 提交者: GitHub

[pten]Remove pten/include dir files (#38878)

* move dot_dev api into dot_kernel.h

* add infermate header

* modify to dotkerel in dot_op.h

* mvoe conj dev api into complex_kernel.h

* move sign dev api into  sign_kernel.h

* move scale dev api into kernel.h and remove infermete.h

* rm paddle/pten/include/math.h

* rm paddle/pten/include/math.h

* rm include dir

* rm paddle/pten/include/math.h

* fix conflict with develop branch

* rm devContext in conj_op.h

* add the missing complex_kernel header
上级 53783e1e
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/include/core.h"
#include "unsupported/Eigen/CXX11/Tensor" #include "unsupported/Eigen/CXX11/Tensor"
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
#include "xpu/refactor/math.h" #include "xpu/refactor/math.h"
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/eager_tensor.h" #include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/kernels/scale_kernel.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
...@@ -33,28 +33,28 @@ static void ScaleDeviceDispatch(const pten::DenseTensor& dense_tensor, ...@@ -33,28 +33,28 @@ static void ScaleDeviceDispatch(const pten::DenseTensor& dense_tensor,
pten::DenseTensor* dense_out) { pten::DenseTensor* dense_out) {
switch (dense_tensor.dtype()) { switch (dense_tensor.dtype()) {
case pten::DataType::FLOAT64: { case pten::DataType::FLOAT64: {
pten::Scale<double, DeviceContext>( pten::ScaleKernel<double, DeviceContext>(
dev_ctx, dense_tensor /* tensor */, scale /* scale */, dev_ctx, dense_tensor /* tensor */, scale /* scale */,
bias /* bias */, bias_after_scale /* bias_after_scale */, bias /* bias */, bias_after_scale /* bias_after_scale */,
dense_out /* out tensor */); dense_out /* out tensor */);
break; break;
} }
case pten::DataType::FLOAT32: { case pten::DataType::FLOAT32: {
pten::Scale<float, DeviceContext>(dev_ctx, dense_tensor /* tensor */, pten::ScaleKernel<float, DeviceContext>(
scale /* scale */, bias /* bias */, dev_ctx, dense_tensor /* tensor */, scale /* scale */,
bias_after_scale /* bias_after_scale */, bias /* bias */, bias_after_scale /* bias_after_scale */,
dense_out /* out tensor */); dense_out /* out tensor */);
break; break;
} }
case pten::DataType::INT64: { case pten::DataType::INT64: {
pten::Scale<int64_t, DeviceContext>( pten::ScaleKernel<int64_t, DeviceContext>(
dev_ctx, dense_tensor /* tensor */, scale /* scale */, dev_ctx, dense_tensor /* tensor */, scale /* scale */,
bias /* bias */, bias_after_scale /* bias_after_scale */, bias /* bias */, bias_after_scale /* bias_after_scale */,
dense_out /* out tensor */); dense_out /* out tensor */);
break; break;
} }
case pten::DataType::INT32: { case pten::DataType::INT32: {
pten::Scale<int32_t, DeviceContext>( pten::ScaleKernel<int32_t, DeviceContext>(
dev_ctx, dense_tensor /* tensor */, scale /* scale */, dev_ctx, dense_tensor /* tensor */, scale /* scale */,
bias /* bias */, bias_after_scale /* bias_after_scale */, bias /* bias */, bias_after_scale /* bias_after_scale */,
dense_out /* out tensor */); dense_out /* out tensor */);
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr { namespace egr {
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
// pten deps // pten deps
#include "paddle/pten/all.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/api/lib/api_declare.h" #include "paddle/pten/api/lib/api_declare.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "paddle/fluid/eager/eager_tensor.h" #include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr { namespace egr {
/** /**
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/framework/var_type_inference.h"
#include "paddle/fluid/framework/var_type_traits.h" #include "paddle/fluid/framework/var_type_traits.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr { namespace egr {
namespace legacy { namespace legacy {
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/pten/include/core.h"
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
namespace paddle { namespace paddle {
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <vector> #include <vector>
#include "paddle/fluid/eager/eager_tensor.h" #include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr { namespace egr {
namespace legacy { namespace legacy {
......
...@@ -24,7 +24,6 @@ limitations under the License. */ ...@@ -24,7 +24,6 @@ limitations under the License. */
#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/init.h"
#include "paddle/fluid/framework/pten_utils.h" #include "paddle/fluid/framework/pten_utils.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -41,7 +41,8 @@ limitations under the License. */ ...@@ -41,7 +41,8 @@ limitations under the License. */
#include "paddle/utils/flat_hash_map.h" #include "paddle/utils/flat_hash_map.h"
#include "paddle/pten/core/arg_map_context.h" #include "paddle/pten/core/arg_map_context.h"
#include "paddle/pten/include/core.h" #include "paddle/pten/core/kernel_context.h"
#include "paddle/pten/core/kernel_factory.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#include "paddle/fluid/imperative/variable_wrapper.h" #include "paddle/fluid/imperative/variable_wrapper.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/macros.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class Variable; class Variable;
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "paddle/fluid/imperative/type_defs.h" #include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/imperative/variable_wrapper.h" #include "paddle/fluid/imperative/variable_wrapper.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/type_defs.h" #include "paddle/fluid/imperative/type_defs.h"
#include "paddle/pten/include/core.h"
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
namespace paddle { namespace paddle {
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/fluid/platform/transform.h" #include "paddle/fluid/platform/transform.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/cast_kernel.h" #include "paddle/pten/kernels/cast_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
// only can include the headers in paddle/pten/api dirs // only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/complex_kernel.h" #include "paddle/pten/kernels/complex_kernel.h"
namespace paddle { namespace paddle {
...@@ -39,7 +38,7 @@ class ConjKernel : public framework::OpKernel<T> { ...@@ -39,7 +38,7 @@ class ConjKernel : public framework::OpKernel<T> {
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out); auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);
// call new kernel // call new kernel
pten::ConjKernel<T, DeviceContext>(dev_ctx, *pt_x.get(), pt_out.get()); pten::ConjKernel<T>(dev_ctx, *pt_x.get(), pt_out.get());
} }
}; };
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
// only can include the headers in paddle/pten/api dirs // only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/dot_grad_kernel.h" #include "paddle/pten/kernels/dot_grad_kernel.h"
#include "paddle/pten/kernels/dot_kernel.h" #include "paddle/pten/kernels/dot_kernel.h"
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include <utility> #include <utility>
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
// only can include the headers in paddle/pten/include dirs
#include "paddle/pten/kernels/math_kernel.h" #include "paddle/pten/kernels/math_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
// only can include the headers in paddle/pten/include dirs
#include "paddle/pten/kernels/math_kernel.h" #include "paddle/pten/kernels/math_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -29,7 +29,6 @@ limitations under the License. */ ...@@ -29,7 +29,6 @@ limitations under the License. */
#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/transform.h" #include "paddle/fluid/platform/transform.h"
// only can include the headers in paddle/pten/include dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/kernels/cpu/elementwise.h" #include "paddle/pten/kernels/cpu/elementwise.h"
......
...@@ -22,7 +22,6 @@ limitations under the License. */ ...@@ -22,7 +22,6 @@ limitations under the License. */
// only can include the headers in paddle/top/api dirs // only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/gpu/elementwise.h" #include "paddle/pten/kernels/gpu/elementwise.h"
namespace paddle { namespace paddle {
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op.h"
// only can include the headers in paddle/pten/include dirs
#include "paddle/pten/kernels/math_kernel.h" #include "paddle/pten/kernels/math_kernel.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/pten_utils.h" #include "paddle/fluid/framework/pten_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/full_kernel.h" #include "paddle/pten/kernels/full_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
#include "paddle/pten/kernels/flatten_grad_kernel.h" #include "paddle/pten/kernels/flatten_grad_kernel.h"
#include "paddle/pten/kernels/flatten_kernel.h" #include "paddle/pten/kernels/flatten_kernel.h"
......
...@@ -27,7 +27,6 @@ limitations under the License. */ ...@@ -27,7 +27,6 @@ limitations under the License. */
// only can include the headers in paddle/pten/api dirs // only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/matmul_grad_kernel.h" #include "paddle/pten/kernels/matmul_grad_kernel.h"
#include "paddle/pten/kernels/matmul_kernel.h" #include "paddle/pten/kernels/matmul_kernel.h"
......
...@@ -26,8 +26,6 @@ limitations under the License. */ ...@@ -26,8 +26,6 @@ limitations under the License. */
// only can include the headers in paddle/pten/api dirs // only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
#include "paddle/pten/kernels/cpu/reduce.h" #include "paddle/pten/kernels/cpu/reduce.h"
#if defined(__HIPCC__) || defined(__NVCC__) #if defined(__HIPCC__) || defined(__NVCC__)
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
// only can include the headers in paddle/pten/api dirs // only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/common/scalar_array.h" #include "paddle/pten/common/scalar_array.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/reshape_grad_kernel.h" #include "paddle/pten/kernels/reshape_grad_kernel.h"
#include "paddle/pten/kernels/reshape_kernel.h" #include "paddle/pten/kernels/reshape_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
// only can include the headers in paddle/top/api dirs // only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/scale_kernel.h" #include "paddle/pten/kernels/scale_kernel.h"
namespace paddle { namespace paddle {
...@@ -70,8 +69,8 @@ class ScaleKernel : public framework::OpKernel<T> { ...@@ -70,8 +69,8 @@ class ScaleKernel : public framework::OpKernel<T> {
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out); auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);
// call new kernel // call new kernel
pten::Scale<T>(dev_ctx, *pt_x.get(), scale, bias, bias_after_scale, pten::ScaleKernel<T>(dev_ctx, *pt_x.get(), scale, bias, bias_after_scale,
pt_out.get()); pt_out.get());
} }
}; };
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/fluid/framework/pten_utils.h" #include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/kernels/sign_kernel.h" #include "paddle/pten/kernels/sign_kernel.h"
namespace paddle { namespace paddle {
......
...@@ -26,7 +26,6 @@ limitations under the License. */ ...@@ -26,7 +26,6 @@ limitations under the License. */
#include "paddle/pten/common/data_type.h" #include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#include "pybind11/numpy.h" #include "pybind11/numpy.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers" #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
......
...@@ -34,7 +34,6 @@ limitations under the License. */ ...@@ -34,7 +34,6 @@ limitations under the License. */
#include "paddle/pten/common/data_type.h" #include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -31,7 +31,6 @@ limitations under the License. */ ...@@ -31,7 +31,6 @@ limitations under the License. */
#include "paddle/pten/common/data_type.h" #include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -28,7 +28,6 @@ limitations under the License. */ ...@@ -28,7 +28,6 @@ limitations under the License. */
#include "paddle/pten/common/data_type.h" #include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#pragma GCC diagnostic ignored "-Wwrite-strings" #pragma GCC diagnostic ignored "-Wwrite-strings"
namespace paddle { namespace paddle {
......
...@@ -26,7 +26,6 @@ limitations under the License. */ ...@@ -26,7 +26,6 @@ limitations under the License. */
#include "paddle/pten/common/data_type.h" #include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -29,4 +29,4 @@ get_property(pten_kernels GLOBAL PROPERTY PTEN_KERNELS) ...@@ -29,4 +29,4 @@ get_property(pten_kernels GLOBAL PROPERTY PTEN_KERNELS)
message(STATUS "All standard pten kernels: ${pten_kernels}") message(STATUS "All standard pten kernels: ${pten_kernels}")
set(PTEN_DEPS ${PTEN_DEPS} ${pten_kernels}) set(PTEN_DEPS ${PTEN_DEPS} ${pten_kernels})
cc_library(pten SRCS all.cc DEPS ${PTEN_DEPS}) cc_library(pten DEPS ${PTEN_DEPS})
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/all.h"
namespace pten {} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// developer apis
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/infermeta.h"
#include "paddle/pten/include/math.h"
...@@ -22,8 +22,7 @@ limitations under the License. */ ...@@ -22,8 +22,7 @@ limitations under the License. */
#include "paddle/pten/api/lib/kernel_dispatch.h" #include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/core.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/include/infermeta.h"
PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT);
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_context.h"
#include "paddle/pten/core/kernel_factory.h"
#include "paddle/pten/core/tensor_meta.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/infermeta/binary.h"
#include "paddle/pten/infermeta/multiary.h"
#include "paddle/pten/infermeta/nullary.h"
#include "paddle/pten/infermeta/unary.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/include/infermeta.h"
#include "paddle/pten/kernels/scale_kernel.h"
namespace pten {
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferMeta(x.meta());
pten::DenseTensor dense_out(
pten::make_intrusive<paddle::experimental::SharedStorage>(
dev_ctx.GetPlace()),
std::move(out_meta));
Scale<T, ContextT>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten
...@@ -15,9 +15,6 @@ limitations under the License. */ ...@@ -15,9 +15,6 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/infermeta.h"
#include "paddle/pten/kernels/empty_kernel.h"
#include "paddle/pten/infermeta/unary.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
......
...@@ -13,18 +13,48 @@ See the License for the specific language governing permissions and ...@@ -13,18 +13,48 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/pten/kernels/scale_kernel.h" #include "paddle/pten/kernels/scale_kernel.h"
#include "paddle/pten/kernels/impl/scale_kernel_impl.h"
#include "paddle/pten/backends/cpu/cpu_context.h" #include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/kernels/funcs/eigen/common.h"
// See Note [ Why still include the fluid headers? ] // See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/bfloat16.h"
namespace pten {
template <typename T, typename Context>
void ScaleKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& scale,
float bias,
bool bias_after_scale,
DenseTensor* out) {
// calc
out->mutable_data<T>();
auto eigen_out = pten::EigenVector<T>::Flatten(*out);
auto eigen_x = pten::EigenVector<T>::Flatten(x);
auto& dev = *dev_ctx.eigen_device();
// TODO(chenweihang): now the eigen function here need the dtype of scale,
// eigen_x, bias should be same, so here need cast for two scalar arg,
// maybe we declare that the type of scale and bias is T?
paddle::operators::EigenScale<std::decay_t<decltype(dev)>, T>::Eval(
dev,
eigen_out,
eigen_x,
scale.to<T>(),
static_cast<T>(bias),
bias_after_scale);
}
} // namespace pten
PT_REGISTER_CTX_KERNEL(scale, PT_REGISTER_CTX_KERNEL(scale,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
pten::Scale, pten::ScaleKernel,
float, float,
double, double,
paddle::platform::bfloat16, paddle::platform::bfloat16,
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/infermeta.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
namespace pten { namespace pten {
......
...@@ -44,12 +44,12 @@ struct ScaleFunctor { ...@@ -44,12 +44,12 @@ struct ScaleFunctor {
}; };
template <typename T, typename ContextT> template <typename T, typename ContextT>
void Scale(const ContextT& dev_ctx, void ScaleKernel(const ContextT& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const Scalar& scale, const Scalar& scale,
float bias, float bias,
bool bias_after_scale, bool bias_after_scale,
DenseTensor* out) { DenseTensor* out) {
std::vector<const DenseTensor*> inputs; std::vector<const DenseTensor*> inputs;
std::vector<DenseTensor*> outputs; std::vector<DenseTensor*> outputs;
inputs.emplace_back(&x); inputs.emplace_back(&x);
...@@ -67,7 +67,7 @@ void Scale(const ContextT& dev_ctx, ...@@ -67,7 +67,7 @@ void Scale(const ContextT& dev_ctx,
PT_REGISTER_CTX_KERNEL(scale, PT_REGISTER_CTX_KERNEL(scale,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
pten::Scale, pten::ScaleKernel,
float, float,
double, double,
paddle::platform::float16, paddle::platform::float16,
......
...@@ -14,8 +14,7 @@ limitations under the License. */ ...@@ -14,8 +14,7 @@ limitations under the License. */
#pragma once #pragma once
// #include "paddle/pten/kernels/complex_kernel.h" #include "paddle/pten/kernels/complex_kernel.h"
#include "paddle/pten/include/math.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
#include "paddle/pten/kernels/impl/dot_grad_kernel_impl.h" #include "paddle/pten/kernels/impl/dot_grad_kernel_impl.h"
#include "paddle/pten/kernels/impl/matmul_kernel_impl.h" #include "paddle/pten/kernels/impl/matmul_kernel_impl.h"
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/common/scalar.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/kernels/funcs/eigen/common.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/operators/eigen/eigen_function.h"
namespace pten {
template <typename T, typename Context>
void Scale(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& scale,
float bias,
bool bias_after_scale,
DenseTensor* out) {
// calc
out->mutable_data<T>();
auto eigen_out = pten::EigenVector<T>::Flatten(*out);
auto eigen_x = pten::EigenVector<T>::Flatten(x);
auto& dev = *dev_ctx.eigen_device();
// TODO(chenweihang): now the eigen function here need the dtype of scale,
// eigen_x, bias should be same, so here need cast for two scalar arg,
// maybe we declare that the type of scale and bias is T?
paddle::operators::EigenScale<std::decay_t<decltype(dev)>, T>::Eval(
dev,
eigen_out,
eigen_x,
scale.to<T>(),
static_cast<T>(bias),
bias_after_scale);
}
} // namespace pten
...@@ -16,7 +16,8 @@ limitations under the License. */ ...@@ -16,7 +16,8 @@ limitations under the License. */
#include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/infermeta.h" #include "paddle/pten/infermeta/binary.h"
#include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
namespace pten { namespace pten {
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/pten/common/scalar_array.h" #include "paddle/pten/common/scalar_array.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/infermeta.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
namespace pten { namespace pten {
......
...@@ -16,15 +16,29 @@ limitations under the License. */ ...@@ -16,15 +16,29 @@ limitations under the License. */
#include "paddle/pten/common/scalar.h" #include "paddle/pten/common/scalar.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h"
namespace pten { namespace pten {
template <typename T, typename Context> template <typename T, typename Context>
void Scale(const Context& dev_ctx, void ScaleKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const Scalar& scale, const Scalar& scale,
float bias, float bias,
bool bias_after_scale, bool bias_after_scale,
DenseTensor* out); DenseTensor* out);
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferMeta(x.meta());
auto dense_out = pten::Empty<T, ContextT>(dev_ctx, std::move(out_meta));
ScaleKernel<T, ContextT>(
dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten } // namespace pten
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/infermeta.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/empty_kernel.h" #include "paddle/pten/kernels/empty_kernel.h"
namespace pten { namespace pten {
......
...@@ -23,8 +23,7 @@ ...@@ -23,8 +23,7 @@
#include "paddle/pten/common/scalar.h" #include "paddle/pten/common/scalar.h"
#include "paddle/pten/common/scalar_array.h" #include "paddle/pten/common/scalar_array.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/core.h" #include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/include/infermeta.h"
#include "paddle/pten/kernels/scale_kernel.h" #include "paddle/pten/kernels/scale_kernel.h"
namespace paddle { namespace paddle {
...@@ -92,42 +91,42 @@ static void ScaleCPU(DataType kernel_dtype, ...@@ -92,42 +91,42 @@ static void ScaleCPU(DataType kernel_dtype,
pten::DenseTensor* dense_out) { pten::DenseTensor* dense_out) {
switch (kernel_dtype) { switch (kernel_dtype) {
case pten::DataType::FLOAT64: { case pten::DataType::FLOAT64: {
pten::Scale<double>( pten::ScaleKernel<double>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::FLOAT32: { case pten::DataType::FLOAT32: {
pten::Scale<float>( pten::ScaleKernel<float>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::BFLOAT16: { case pten::DataType::BFLOAT16: {
pten::Scale<paddle::platform::bfloat16>( pten::ScaleKernel<paddle::platform::bfloat16>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT64: { case pten::DataType::INT64: {
pten::Scale<int64_t>( pten::ScaleKernel<int64_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT32: { case pten::DataType::INT32: {
pten::Scale<int32_t>( pten::ScaleKernel<int32_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT16: { case pten::DataType::INT16: {
pten::Scale<int16_t>( pten::ScaleKernel<int16_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT8: { case pten::DataType::INT8: {
pten::Scale<int8_t>( pten::ScaleKernel<int8_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::UINT8: { case pten::DataType::UINT8: {
pten::Scale<uint8_t>( pten::ScaleKernel<uint8_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
...@@ -151,42 +150,42 @@ static void ScaleGPU(DataType kernel_dtype, ...@@ -151,42 +150,42 @@ static void ScaleGPU(DataType kernel_dtype,
pten::DenseTensor* dense_out) { pten::DenseTensor* dense_out) {
switch (kernel_dtype) { switch (kernel_dtype) {
case pten::DataType::FLOAT64: { case pten::DataType::FLOAT64: {
pten::Scale<double>( pten::ScaleKernel<double>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::FLOAT32: { case pten::DataType::FLOAT32: {
pten::Scale<float>( pten::ScaleKernel<float>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::FLOAT16: { case pten::DataType::FLOAT16: {
pten::Scale<paddle::platform::float16>( pten::ScaleKernel<paddle::platform::float16>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT64: { case pten::DataType::INT64: {
pten::Scale<int64_t>( pten::ScaleKernel<int64_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT32: { case pten::DataType::INT32: {
pten::Scale<int32_t>( pten::ScaleKernel<int32_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT16: { case pten::DataType::INT16: {
pten::Scale<int16_t>( pten::ScaleKernel<int16_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::INT8: { case pten::DataType::INT8: {
pten::Scale<int8_t>( pten::ScaleKernel<int8_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
case pten::DataType::UINT8: { case pten::DataType::UINT8: {
pten::Scale<uint8_t>( pten::ScaleKernel<uint8_t>(
dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out); dev_ctx, x, pten::Scalar(scale), bias, bias_after_scale, dense_out);
break; break;
} }
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <memory> #include <memory>
#include "paddle/pten/include/math.h" #include "paddle/pten/kernels/scale_kernel.h"
#include "paddle/pten/api/lib/utils/allocator.h" #include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
......
...@@ -345,8 +345,10 @@ def source_include(header_file_path): ...@@ -345,8 +345,10 @@ def source_include(header_file_path):
#include "paddle/pten/api/lib/kernel_dispatch.h" #include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/core.h" #include "paddle/pten/infermeta/binary.h"
#include "paddle/pten/include/infermeta.h" #include "paddle/pten/infermeta/multiary.h"
#include "paddle/pten/infermeta/nullary.h"
#include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/declarations.h" #include "paddle/pten/kernels/declarations.h"
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册