未验证 提交 3121f889 编写于 作者: C Chen Weihang 提交者: GitHub

change api->include and hapi->api (#36938)

上级 10b039b7
......@@ -40,7 +40,7 @@ limitations under the License. */
#include "paddle/fluid/platform/variant.h"
#include "paddle/utils/flat_hash_map.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/include/core.h"
namespace paddle {
namespace framework {
......
......@@ -25,8 +25,8 @@ limitations under the License. */
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/utils/flat_hash_map.h"
#include "paddle/utils/small_vector.h"
......
......@@ -27,7 +27,7 @@
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/include/core.h"
DECLARE_bool(use_mkldnn);
......
......@@ -20,9 +20,9 @@
#include "paddle/fluid/platform/for_range.h"
// only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/linalg.h"
namespace paddle {
namespace operators {
......
......@@ -19,8 +19,8 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/creation.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/creation.h"
namespace paddle {
namespace operators {
......
......@@ -26,9 +26,9 @@ limitations under the License. */
#include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h"
// only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/linalg.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
......
......@@ -18,9 +18,9 @@ limitations under the License. */
#include "paddle/fluid/framework/pten_utils.h"
// only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
namespace paddle {
namespace operators {
......
......@@ -18,9 +18,9 @@ limitations under the License. */
#include "paddle/fluid/framework/pten_utils.h"
// only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
namespace paddle {
namespace operators {
......
......@@ -20,8 +20,8 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h"
// only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
namespace paddle {
namespace operators {
......
# pten api
# pten (low level) api headers: include
# pten (high level) api
add_subdirectory(api)
# pten high level api
add_subdirectory(hapi)
# pten core components
add_subdirectory(core)
# pten kernels for diff device
......@@ -10,3 +9,13 @@ add_subdirectory(kernels)
add_subdirectory(infershape)
# pten tests
add_subdirectory(tests)
# make an unity target for compile deps
set(PTEN_DEPS convert_utils dense_tensor kernel_factory kernel_context)
set(PTEN_DEPS ${PTEN_DEPS} math_cpu linalg_cpu creation_cpu manipulation_cpu)
set(PTEN_DEPS ${PTEN_DEPS} unary binary)
if(WITH_GPU OR WITH_ROCM)
set(PTEN_DEPS ${PTEN_DEPS} math_cuda linalg_cuda creation_cuda manipulation_cuda)
endif()
cc_library(pten SRCS all.cc DEPS ${PTEN_DEPS})
......@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/all.h"
#include "paddle/pten/all.h"
namespace paddle {
namespace experimental {} // namespace experimental
} // namespace paddle
namespace pten {} // namespace pten
......@@ -14,9 +14,10 @@ limitations under the License. */
#pragma once
// user apis
#include "paddle/pten/hapi/include/creation.h"
#include "paddle/pten/hapi/include/linalg.h"
#include "paddle/pten/hapi/include/manipulation.h"
#include "paddle/pten/hapi/include/math.h"
#include "paddle/pten/hapi/include/tensor.h"
// develop apis
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/creation.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/include/linalg.h"
#include "paddle/pten/include/manipulation.h"
#include "paddle/pten/include/math.h"
set(PTEN_DEPS convert_utils dense_tensor kernel_factory kernel_context)
set(PTEN_DEPS ${PTEN_DEPS} math_cpu linalg_cpu creation_cpu manipulation_cpu)
set(PTEN_DEPS ${PTEN_DEPS} unary binary)
if(WITH_GPU OR WITH_ROCM)
set(PTEN_DEPS ${PTEN_DEPS} math_cuda linalg_cuda creation_cuda manipulation_cuda)
endif()
add_subdirectory(lib)
cc_library(pten SRCS all.cc DEPS ${PTEN_DEPS})
cc_library(pten_hapi SRCS all.cc DEPS linalg_api math_api creation_api)
......@@ -14,4 +14,6 @@ limitations under the License. */
#include "paddle/pten/api/all.h"
namespace pten {} // namespace pten
namespace paddle {
namespace experimental {} // namespace experimental
} // namespace paddle
......@@ -14,10 +14,9 @@ limitations under the License. */
#pragma once
// develop apis
#include "paddle/pten/api/include/core.h"
// user apis
#include "paddle/pten/api/include/creation.h"
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/api/include/manipulation.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/api/include/tensor.h"
......@@ -14,26 +14,20 @@
#pragma once
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/kernels/cpu/creation.h"
#include "paddle/pten/kernels/cuda/creation.h"
#include "paddle/pten/api/include/tensor.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/common/scalar.h"
namespace pten {
namespace paddle {
namespace experimental {
// TODO(YuanRisheng) This function name should be same as User API name.
// TODO(zyfncg) Automatic code generation
template <typename T, typename ContextT>
DenseTensor FillAnyLike(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& val) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
FillAnyLike<T>(dev_ctx, x, val, &dense_out);
return dense_out;
}
Tensor full_like(const Tensor& x,
const Scalar& value,
DataType dtype = DataType::UNDEFINED);
} // namespace pten
Tensor ones_like(const Tensor& x, DataType dtype = DataType::UNDEFINED);
Tensor zeros_like(const Tensor& x, DataType dtype = DataType::UNDEFINED);
} // namespace experimental
} // namespace paddle
......@@ -14,25 +14,17 @@
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/kernels/cpu/linalg.h"
#include "paddle/pten/kernels/cuda/linalg.h"
#include "paddle/pten/api/include/tensor.h"
namespace pten {
namespace paddle {
namespace experimental {
template <typename T, typename ContextT>
DenseTensor Dot(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
auto out_meta = DotInferShape(x.meta(), y.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Dot<T>(dev_ctx, x, y, &dense_out);
return dense_out;
}
Tensor dot(const Tensor& x, const Tensor& y);
} // namespace pten
Tensor matmul(const Tensor& x,
const Tensor& y,
bool transpose_x,
bool transpose_y);
} // namespace experimental
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/kernels/cpu/manipulation.h"
#include "paddle/pten/kernels/cuda/manipulation.h"
namespace pten {
template <typename T, typename ContextT>
DenseTensor Flatten(const ContextT& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Flatten<T>(dev_ctx, x, start_axis, stop_axis, &dense_out);
return dense_out;
}
} // namespace pten
#include "paddle/pten/api/include/tensor.h"
namespace paddle {
namespace experimental {
Tensor flatten(const Tensor& x, int start_axis, int stop_axis);
} // namespace experimental
} // namespace paddle
......@@ -14,63 +14,14 @@ limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/kernels/cpu/math.h"
#include "paddle/pten/kernels/cuda/math.h"
#include "paddle/pten/api/include/tensor.h"
namespace pten {
namespace paddle {
namespace experimental {
template <typename T, typename ContextT>
DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Sign<T>(dev_ctx, x, &dense_out);
return dense_out;
}
// TODO(chenweihang): add scale API
// TODO(chenweihang): move mean API into stat.h/cc
Tensor mean(const Tensor& x);
template <typename T, typename ContextT>
DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = ReductionInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Mean<T>(dev_ctx, x, &dense_out);
return dense_out;
}
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
float scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Scale<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
ScaleHost<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten
} // namespace experimental
} // namespace paddle
......@@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/include/creation.h"
#include "paddle/pten/api/include/creation.h"
#include <memory>
#include "glog/logging.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/kernel_dispatch.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/infershape.h"
namespace paddle {
namespace experimental {
......
......@@ -18,10 +18,10 @@ limitations under the License. */
#include <string>
#include <utility>
#include "paddle/pten/api/include/backend_set.h"
#include "paddle/pten/api/include/tensor.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/common/layout.h"
#include "paddle/pten/hapi/include/backend_set.h"
#include "paddle/pten/hapi/include/tensor.h"
// TODO(chenweihang): split KernelName, Key, Kernel, Factory into diff files
#include "paddle/pten/core/convert_utils.h"
......
......@@ -12,19 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/include/linalg.h"
#include "paddle/pten/api/include/linalg.h"
#include <memory>
#include "glog/logging.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_context.h"
#include "paddle/pten/hapi/lib/kernel_dispatch.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/infershape.h"
namespace paddle {
namespace experimental {
......
......@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/include/manipulation.h"
#include "paddle/pten/api/include/manipulation.h"
#include <memory>
#include "glog/logging.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/hapi/lib/kernel_dispatch.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/infershape/unary.h"
namespace paddle {
......
......@@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/include/math.h"
#include "paddle/pten/api/include/math.h"
#include <memory>
#include "glog/logging.h"
#include "paddle/pten/api/include/core.h"
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/hapi/lib/kernel_dispatch.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/infershape/unary.h"
namespace paddle {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/allocator.h"
namespace paddle {
namespace experimental {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/lib/utils/storage.h"
#include "paddle/pten/api/lib/utils/storage.h"
namespace paddle {
namespace experimental {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
namespace paddle {
namespace experimental {
......
......@@ -19,11 +19,11 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_factory.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/hapi/lib/utils/storage.h"
namespace paddle {
namespace experimental {
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/hapi/lib/utils/storage.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
namespace paddle {
namespace experimental {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
namespace paddle {
namespace experimental {
......
add_subdirectory(lib)
cc_library(pten_hapi SRCS all.cc DEPS linalg_api math_api creation_api)
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/hapi/include/tensor.h"
namespace paddle {
namespace experimental {
Tensor flatten(const Tensor& x, int start_axis, int stop_axis);
} // namespace experimental
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/hapi/include/tensor.h"
namespace paddle {
namespace experimental {
// TODO(chenweihang): add scale API
// TODO(chenweihang): move mean API into stat.h/cc
Tensor mean(const Tensor& x);
} // namespace experimental
} // namespace paddle
......@@ -14,20 +14,26 @@
#pragma once
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/pten/hapi/include/tensor.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/kernels/cpu/creation.h"
#include "paddle/pten/kernels/cuda/creation.h"
namespace paddle {
namespace experimental {
namespace pten {
Tensor full_like(const Tensor& x,
const Scalar& value,
DataType dtype = DataType::UNDEFINED);
// TODO(YuanRisheng) This function name should be same as User API name.
// TODO(zyfncg) Automatic code generation
template <typename T, typename ContextT>
DenseTensor FillAnyLike(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& val) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
FillAnyLike<T>(dev_ctx, x, val, &dense_out);
return dense_out;
}
Tensor ones_like(const Tensor& x, DataType dtype = DataType::UNDEFINED);
Tensor zeros_like(const Tensor& x, DataType dtype = DataType::UNDEFINED);
} // namespace experimental
} // namespace paddle
} // namespace pten
......@@ -14,17 +14,25 @@
#pragma once
#include "paddle/pten/hapi/include/tensor.h"
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/kernels/cpu/linalg.h"
#include "paddle/pten/kernels/cuda/linalg.h"
namespace paddle {
namespace experimental {
namespace pten {
Tensor dot(const Tensor& x, const Tensor& y);
template <typename T, typename ContextT>
DenseTensor Dot(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
auto out_meta = DotInferShape(x.meta(), y.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Dot<T>(dev_ctx, x, y, &dense_out);
return dense_out;
}
Tensor matmul(const Tensor& x,
const Tensor& y,
bool transpose_x,
bool transpose_y);
} // namespace experimental
} // namespace paddle
} // namespace pten
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/kernels/cpu/manipulation.h"
#include "paddle/pten/kernels/cuda/manipulation.h"
namespace pten {
template <typename T, typename ContextT>
DenseTensor Flatten(const ContextT& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Flatten<T>(dev_ctx, x, start_axis, stop_axis, &dense_out);
return dense_out;
}
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/include/infershape.h"
#include "paddle/pten/kernels/cpu/math.h"
#include "paddle/pten/kernels/cuda/math.h"
namespace pten {
template <typename T, typename ContextT>
DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Sign<T>(dev_ctx, x, &dense_out);
return dense_out;
}
template <typename T, typename ContextT>
DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = ReductionInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Mean<T>(dev_ctx, x, &dense_out);
return dense_out;
}
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
float scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
Scale<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
pten::DenseTensor dense_out(allocator, out_meta);
ScaleHost<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten
......@@ -28,9 +28,9 @@ namespace cub = hipcub;
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/tensor_utils.h"
namespace pten {
......
......@@ -18,8 +18,8 @@ limitations under the License. */
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/kernels/cpu/utils.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
PT_DECLARE_MODULE(UtilsCPU);
......
......@@ -15,13 +15,13 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/linalg.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/include/linalg.h"
PT_DECLARE_MODULE(LinalgCPU);
......
......@@ -15,13 +15,13 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/creation.h"
#include "paddle/pten/api/include/creation.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/include/creation.h"
#include "paddle/pten/include/creation.h"
PT_DECLARE_MODULE(CreationCPU);
......
......@@ -15,13 +15,13 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/manipulation.h"
#include "paddle/pten/api/include/manipulation.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/include/manipulation.h"
#include "paddle/pten/include/manipulation.h"
PT_DECLARE_MODULE(ManipulationCPU);
......
......@@ -15,11 +15,11 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/linalg.h"
#include "paddle/pten/api/include/linalg.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/kernels/cuda/utils.h"
PT_DECLARE_MODULE(LinalgCPU);
......
......@@ -15,13 +15,13 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/math.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/include/math.h"
PT_DECLARE_MODULE(MathCPU);
......
......@@ -15,13 +15,13 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/hapi/include/math.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/hapi/lib/utils/allocator.h"
#include "paddle/pten/api/include/math.h"
#include "paddle/pten/include/math.h"
PT_DECLARE_MODULE(MathCPU);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册