未验证 提交 c23afce1 编写于 作者: C Chen Weihang 提交者: GitHub

move tensor using to single header (#38142)

上级 75332401
...@@ -110,8 +110,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, ...@@ -110,8 +110,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
const std::vector<std::string>& outputs, const std::vector<std::string>& outputs,
const std::vector<std::string>& attrs) { const std::vector<std::string>& attrs) {
VLOG(1) << "Custom Operator: Start run KernelFunc."; VLOG(1) << "Custom Operator: Start run KernelFunc.";
std::vector<paddle::Tensor> custom_ins; std::vector<paddle::experimental::Tensor> custom_ins;
std::vector<std::vector<paddle::Tensor>> custom_vec_ins; std::vector<std::vector<paddle::experimental::Tensor>> custom_vec_ins;
for (auto& in_name : inputs) { for (auto& in_name : inputs) {
VLOG(1) << "Custom Operator: input name - " << in_name; VLOG(1) << "Custom Operator: input name - " << in_name;
if (detail::IsDuplicableVar(in_name)) { if (detail::IsDuplicableVar(in_name)) {
...@@ -120,7 +120,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, ...@@ -120,7 +120,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
PADDLE_ENFORCE_NE(vec_x.empty(), true, PADDLE_ENFORCE_NE(vec_x.empty(), true,
platform::errors::NotFound( platform::errors::NotFound(
"Input vector<tensor> (%s) is empty.", in_name)); "Input vector<tensor> (%s) is empty.", in_name));
std::vector<paddle::Tensor> custom_vec_in; std::vector<paddle::experimental::Tensor> custom_vec_in;
for (size_t i = 0; i < vec_x.size(); ++i) { for (size_t i = 0; i < vec_x.size(); ++i) {
auto* x = vec_x[i]; auto* x = vec_x[i];
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
...@@ -132,7 +132,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, ...@@ -132,7 +132,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
"The %d-th tensor in input vector<tensor> (%s) " "The %d-th tensor in input vector<tensor> (%s) "
"is not initialized.", "is not initialized.",
i, in_name)); i, in_name));
paddle::Tensor custom_t; paddle::experimental::Tensor custom_t;
custom_t.set_impl(std::move(experimental::MakePtenDenseTensor(*x))); custom_t.set_impl(std::move(experimental::MakePtenDenseTensor(*x)));
custom_vec_in.emplace_back(custom_t); custom_vec_in.emplace_back(custom_t);
} }
...@@ -144,7 +144,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, ...@@ -144,7 +144,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
PADDLE_ENFORCE_EQ(x->IsInitialized(), true, PADDLE_ENFORCE_EQ(x->IsInitialized(), true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Input tensor (%s) is not initialized.", in_name)); "Input tensor (%s) is not initialized.", in_name));
paddle::Tensor custom_in; paddle::experimental::Tensor custom_in;
custom_in.set_impl(std::move(experimental::MakePtenDenseTensor(*x))); custom_in.set_impl(std::move(experimental::MakePtenDenseTensor(*x)));
custom_ins.emplace_back(custom_in); custom_ins.emplace_back(custom_in);
} }
......
...@@ -42,3 +42,4 @@ limitations under the License. */ ...@@ -42,3 +42,4 @@ limitations under the License. */
#include "paddle/pten/api/ext/exception.h" #include "paddle/pten/api/ext/exception.h"
#include "paddle/pten/api/ext/op_meta_info.h" #include "paddle/pten/api/ext/op_meta_info.h"
#include "paddle/pten/api/ext/place.h" #include "paddle/pten/api/ext/place.h"
#include "paddle/pten/api/ext/tensor_compat.h"
...@@ -36,7 +36,7 @@ namespace framework { ...@@ -36,7 +36,7 @@ namespace framework {
class PADDLE_API OpMetaInfoHelper; class PADDLE_API OpMetaInfoHelper;
} // namespace framework } // namespace framework
using Tensor = paddle::Tensor; using Tensor = paddle::experimental::Tensor;
///////////////// Util Marco Define //////////////// ///////////////// Util Marco Define ////////////////
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/api/include/tensor.h"
// Note(chenweihang): In order to be compatible with the original custom
// operator Tensor interface, only available to external users, the file
// cannot be includeed in paddle
namespace paddle {
using Tensor = paddle::experimental::Tensor;
} // namespace paddle
...@@ -489,8 +489,3 @@ class PADDLE_API Tensor final { ...@@ -489,8 +489,3 @@ class PADDLE_API Tensor final {
} // namespace experimental } // namespace experimental
} // namespace paddle } // namespace paddle
namespace paddle {
// In order to be compatible with the original custom operator Tensor interface
using Tensor = paddle::experimental::Tensor;
} // namespace paddle
...@@ -21,9 +21,9 @@ namespace paddle { ...@@ -21,9 +21,9 @@ namespace paddle {
namespace tests { namespace tests {
template <typename T> template <typename T>
paddle::Tensor InitCPUTensorForTest() { experimental::Tensor InitCPUTensorForTest() {
std::vector<int64_t> tensor_shape{5, 5}; std::vector<int64_t> tensor_shape{5, 5};
auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape); auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
auto* p_data_ptr = t1.mutable_data<T>(paddle::PlaceType::kCPU); auto* p_data_ptr = t1.mutable_data<T>(paddle::PlaceType::kCPU);
for (int64_t i = 0; i < t1.size(); i++) { for (int64_t i = 0; i < t1.size(); i++) {
p_data_ptr[i] = T(5); p_data_ptr[i] = T(5);
...@@ -57,18 +57,18 @@ void TestCopyTensor() { ...@@ -57,18 +57,18 @@ void TestCopyTensor() {
void TestAPIPlace() { void TestAPIPlace() {
std::vector<int64_t> tensor_shape = {5, 5}; std::vector<int64_t> tensor_shape = {5, 5};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto t1 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape); auto t1 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape);
t1.mutable_data<float>(); t1.mutable_data<float>();
CHECK((paddle::PlaceType::kGPU == t1.place())); CHECK((paddle::PlaceType::kGPU == t1.place()));
#endif #endif
auto t2 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape); auto t2 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
t2.mutable_data<float>(); t2.mutable_data<float>();
CHECK((paddle::PlaceType::kCPU == t2.place())); CHECK((paddle::PlaceType::kCPU == t2.place()));
} }
void TestAPISizeAndShape() { void TestAPISizeAndShape() {
std::vector<int64_t> tensor_shape = {5, 5}; std::vector<int64_t> tensor_shape = {5, 5};
auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape); auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
CHECK_EQ(t1.size(), 25); CHECK_EQ(t1.size(), 25);
CHECK(t1.shape() == tensor_shape); CHECK(t1.shape() == tensor_shape);
} }
...@@ -79,19 +79,19 @@ void TestAPISlice() { ...@@ -79,19 +79,19 @@ void TestAPISlice() {
std::vector<int64_t> tensor_shape_origin2 = {5, 5, 5}; std::vector<int64_t> tensor_shape_origin2 = {5, 5, 5};
std::vector<int64_t> tensor_shape_sub2 = {1, 5, 5}; std::vector<int64_t> tensor_shape_sub2 = {1, 5, 5};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto t1 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin1); auto t1 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin1);
t1.mutable_data<float>(); t1.mutable_data<float>();
CHECK(t1.slice(0, 5).shape() == tensor_shape_origin1); CHECK(t1.slice(0, 5).shape() == tensor_shape_origin1);
CHECK(t1.slice(0, 3).shape() == tensor_shape_sub1); CHECK(t1.slice(0, 3).shape() == tensor_shape_sub1);
auto t2 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin2); auto t2 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin2);
t2.mutable_data<float>(); t2.mutable_data<float>();
CHECK(t2.slice(4, 5).shape() == tensor_shape_sub2); CHECK(t2.slice(4, 5).shape() == tensor_shape_sub2);
#endif #endif
auto t3 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin1); auto t3 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin1);
t3.mutable_data<float>(); t3.mutable_data<float>();
CHECK(t3.slice(0, 5).shape() == tensor_shape_origin1); CHECK(t3.slice(0, 5).shape() == tensor_shape_origin1);
CHECK(t3.slice(0, 3).shape() == tensor_shape_sub1); CHECK(t3.slice(0, 3).shape() == tensor_shape_sub1);
auto t4 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin2); auto t4 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin2);
t4.mutable_data<float>(); t4.mutable_data<float>();
CHECK(t4.slice(4, 5).shape() == tensor_shape_sub2); CHECK(t4.slice(4, 5).shape() == tensor_shape_sub2);
...@@ -111,7 +111,7 @@ void TestAPISlice() { ...@@ -111,7 +111,7 @@ void TestAPISlice() {
template <typename T> template <typename T>
paddle::DataType TestDtype() { paddle::DataType TestDtype() {
std::vector<int64_t> tensor_shape = {5, 5}; std::vector<int64_t> tensor_shape = {5, 5};
auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape); auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
t1.template mutable_data<T>(); t1.template mutable_data<T>();
return t1.type(); return t1.type();
} }
...@@ -119,12 +119,12 @@ paddle::DataType TestDtype() { ...@@ -119,12 +119,12 @@ paddle::DataType TestDtype() {
template <typename T> template <typename T>
void TestCast(paddle::DataType data_type) { void TestCast(paddle::DataType data_type) {
std::vector<int64_t> tensor_shape = {5, 5}; std::vector<int64_t> tensor_shape = {5, 5};
auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape); auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
t1.template mutable_data<T>(); t1.template mutable_data<T>();
auto t2 = t1.cast(data_type); auto t2 = t1.cast(data_type);
CHECK(t2.type() == data_type); CHECK(t2.type() == data_type);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto tg1 = paddle::Tensor(paddle::PlaceType::kGPU); auto tg1 = experimental::Tensor(paddle::PlaceType::kGPU);
tg1.reshape(tensor_shape); tg1.reshape(tensor_shape);
tg1.template mutable_data<T>(); tg1.template mutable_data<T>();
auto tg2 = tg1.cast(data_type); auto tg2 = tg1.cast(data_type);
...@@ -192,7 +192,7 @@ void GroupTestDtype() { ...@@ -192,7 +192,7 @@ void GroupTestDtype() {
} }
void TestInitilized() { void TestInitilized() {
paddle::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1}); experimental::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1});
CHECK(test_tensor.is_initialized() == false); CHECK(test_tensor.is_initialized() == false);
test_tensor.mutable_data<float>(); test_tensor.mutable_data<float>();
CHECK(test_tensor.is_initialized() == true); CHECK(test_tensor.is_initialized() == true);
......
...@@ -26,7 +26,7 @@ namespace tests { ...@@ -26,7 +26,7 @@ namespace tests {
namespace framework = paddle::framework; namespace framework = paddle::framework;
using DDim = paddle::framework::DDim; using DDim = paddle::framework::DDim;
void CheckScaleResult(Tensor* out) { void CheckScaleResult(experimental::Tensor* out) {
ASSERT_EQ(out->dims().size(), 2); ASSERT_EQ(out->dims().size(), 2);
ASSERT_EQ(out->dims()[0], 3); ASSERT_EQ(out->dims()[0], 3);
ASSERT_EQ(out->dims()[1], 4); ASSERT_EQ(out->dims()[1], 4);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册