未验证 提交 cb6caf56 编写于 作者: 石晓伟 提交者: GitHub

infrt kernel test, test=develop (#39361)

上级 3cca89e7
#TO DO:remove fluid
include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform)
option(INFRT_WITH_PTEN "Compile PaddlePaddle InfRT with Paddle Tensor Library." ON)
if (NOT WITH_INFRT)
return()
......@@ -19,6 +18,15 @@ include(infrt_lib)
set(infrt_src CACHE INTERNAL "" FORCE)
if (INFRT_WITH_PTEN)
add_definitions("-DINFRT_WITH_PTEN")
endif()
if (INFRT_WITH_PTEN)
#TO DO:remove fluid
include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform)
endif()
# Gather headers for library publish.
function(core_gather_headers)
file(GLOB includes LIST_DIRECTORIES false RELATIVE ${CMAKE_SOURCE_DIR} *.h)
......@@ -68,6 +76,7 @@ endif()
add_subdirectory(api)
add_subdirectory(backends)
add_subdirectory(common)
add_subdirectory(dialect)
add_subdirectory(host_context)
......@@ -93,8 +102,14 @@ set(infrt_mlir_incs
)
message(STATUS "infrt srcs:\n${infrt_src}")
if (INFRT_WITH_PTEN)
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor)
add_dependencies(infrt ${infrt_mlir_incs})
else()
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
add_dependencies(infrt ${infrt_mlir_incs})
endif()
add_custom_target(test_infrt_exec DEPENDS ${INFRT_TEST_TARGETS})
cc_library(pten_cpu_context SRCS pten_context.cc DEPS pten)
cc_library(pten_cpu_allocator SRCS pten_allocator.cc DEPS pten)
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/infrt/backends/host/pten_allocator.h"
namespace infrt {
namespace backends {} // namespace backends
} // namespace infrt
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/core/allocator.h"
namespace infrt {
namespace backends {
class HostPtenAllocator : public pten::Allocator {
public:
static void deleter(pten::Allocation* ptr) { ::operator delete(ptr); }
AllocationPtr Allocate(size_t bytes_size) {
return AllocationPtr(
new pten::Allocation(::operator new(bytes_size),
bytes_size,
pten::Place(pten::AllocationType::CPU)),
deleter);
}
};
} // namespace backends
} // namespace infrt
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/infrt/backends/host/pten_context.h"
namespace infrt {
namespace backends {} // namespace backends
} // namespace infrt
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/pten/backends/cpu/cpu_context.h"
namespace infrt {
namespace backends {
class HostPtenContext : public pten::CPUContext {
public:
using Base = pten::CPUContext;
using pten::CPUContext::SetEigenDevice;
};
} // namespace backends
} // namespace infrt
......@@ -28,6 +28,10 @@
#include "paddle/infrt/tensor/dense_tensor_view.h"
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/infrt/tensor/tensor_shape.h"
#ifdef INFRT_WITH_PTEN
#include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/core/dense_tensor.h"
#endif // INFRT_WITH_PTEN
namespace infrt {
namespace host_context {
......@@ -45,8 +49,10 @@ using ValueVariantType = Variant<int16_t,
tensor::DenseHostTensor,
MlirFunctionExecutable*,
tensor::TensorMap,
// pten::CPUContext,
// pten::DenseTensor,
#ifdef INFRT_WITH_PTEN
pten::CPUContext,
pten::DenseTensor,
#endif
std::vector<int16_t>,
std::vector<int32_t>,
std::vector<int64_t>,
......
core_gather_headers()
if (INFRT_WITH_PTEN)
set(pten_kernel_src pten_kernels.cc)
endif()
gather_srcs(infrt_src SRCS
basic_kernels.cc
# pten_kernels.cc
${pten_kernel_src}
test_kernels.cc
tensor_shape_kernels.cc
tensor_kernels.cc
control_flow_kernels.cc
)
if(INFRT_WITH_PTEN)
add_subdirectory(pten)
endif()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
add_subdirectory(tests)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cc_test(reshape_kernel_test SRCS reshape_kernel_test.cc DEPS pten reshape_kernel)
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/infrt/backends/host/pten_allocator.h"
#include "paddle/infrt/backends/host/pten_context.h"
#include "paddle/pten/kernels/reshape_kernel.h"
namespace infrt {
namespace kernels {
namespace tests {
TEST(pten, reshape) {
auto allocator = backends::HostPtenAllocator();
auto context = backends::HostPtenContext();
context.SetDeviceAllocator(&allocator);
context.SetHostAllocator(&allocator);
auto tensor_meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32,
pten::framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW);
auto dense_x = pten::DenseTensor(&allocator, std::move(tensor_meta));
auto* dense_x_data = static_cast<float*>(
dense_x.AllocateFrom(&allocator, pten::DataType::FLOAT32));
// The writing is cumbersome and needs to be adjusted.
auto out = pten::Reshape<float, backends::HostPtenContext::Base>(
context, dense_x, {12, 3});
std::vector<int64_t> expect_shape = {12, 3};
ASSERT_EQ(out.dims()[0], expect_shape[0]);
ASSERT_EQ(out.dims()[1], expect_shape[1]);
ASSERT_EQ(out.numel(), 36);
ASSERT_EQ(out.meta().dtype, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
bool value_equal = true;
auto* dense_out_data = out.data<float>();
for (int i = 0; i < dense_x.numel(); i++) {
if (std::abs(dense_x_data[i] - dense_out_data[i]) > 1e-6f)
value_equal = false;
}
ASSERT_EQ(value_equal, true);
}
} // namespace tests
} // namespace kernels
} // namespace infrt
......@@ -52,6 +52,7 @@ struct CPUContext::Impl {
CPUContext::CPUContext()
: DeviceContext(), impl_(std::make_unique<CPUContext::Impl>()) {}
CPUContext::CPUContext(CPUContext&&) = default;
CPUContext::CPUContext(const Place& place)
: DeviceContext(), impl_(std::make_unique<CPUContext::Impl>(place)) {}
......
......@@ -27,6 +27,7 @@ namespace pten {
class CPUContext : public DeviceContext {
public:
CPUContext();
CPUContext(CPUContext&&);
explicit CPUContext(const Place&);
virtual ~CPUContext();
Eigen::DefaultDevice* eigen_device() const;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册