提交 95f89e71 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4925 [MS][LITE][GPU]add reshape test case

Merge pull request !4925 from chenzupeng/master-lite
......@@ -49,7 +49,7 @@ int DepthwiseConv2dOpenCLKernel::Init() {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
}
if (mem_type_ == MEM_TYPE::BUF) {
if (out_mem_type_ == OpenCLMemType::BUF) {
kernel_name += "_BUF";
} else {
kernel_name += "_IMG";
......@@ -73,7 +73,7 @@ int DepthwiseConv2dOpenCLKernel::Init() {
ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options);
#endif
this->InitBuffer();
MS_LOG(DEBUG) << kernel_name << " Init Done! mem type=" << static_cast<int>(mem_type_);
MS_LOG(DEBUG) << kernel_name << " Init Done! mem type=" << static_cast<int>(out_mem_type_);
return RET_OK;
}
......
......@@ -49,7 +49,6 @@ class DepthwiseConv2dOpenCLKernel : public OpenCLKernel {
FLOAT_t *packed_weight_;
FLOAT_t *bias_data_;
cl::Kernel kernel_;
enum class MEM_TYPE { BUF, IMG } mem_type_{MEM_TYPE::IMG};
};
} // namespace mindspore::kernel
......
......@@ -64,7 +64,7 @@ int PoolingOpenCLKernel::Init() {
#ifdef PROGRAM_WITH_IL
ocl_runtime->CreateKernelFromIL(kernel_(), kernel_name);
#else
if (mem_type_ == MEM_TYPE::BUF) {
if (out_mem_type_ == OpenCLMemType::BUF) {
kernel_name += "_BUF";
} else {
kernel_name += "_IMG";
......
......@@ -42,7 +42,6 @@ class PoolingOpenCLKernel : public OpenCLKernel {
private:
std::vector<size_t> InitGlobalSize() const;
enum class MEM_TYPE { BUF, IMG } mem_type_{MEM_TYPE::IMG};
PoolingParameter *parameter_;
cl::Kernel kernel_;
};
......
......@@ -33,7 +33,17 @@ namespace mindspore::kernel {
int ReshapeOpenCLKernel::Init() {
std::string kernel_name = "reshape";
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
if (in_ori_format_ != schema::Format_NHWC4 && in_ori_format_ != schema::Format_NHWC) {
MS_LOG(ERROR) << "Reshape input format:" << in_ori_format_ << " not support yet.";
return RET_ERROR;
}
if (in_tensors_[0]->shape().back() != out_tensors_[0]->shape().back()) {
MS_LOG(ERROR) << "Reshape input channel " << in_tensors_[0]->shape().back() << " should equal output channel"
<< out_tensors_[0]->shape().back();
return RET_ERROR;
}
#ifdef PROGRAM_WITH_IL
ocl_runtime->CreateKernelFromIL(kernel_(), kernel_name);
#else
......@@ -43,9 +53,7 @@ int ReshapeOpenCLKernel::Init() {
ocl_runtime->LoadSource(program_name, source);
ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options);
#endif
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(schema::Format_NHWC4);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(schema::Format_NHWC4);
if (out_tensors_[0]->shape().size() == 2) {
out_ori_format_ = schema::Format_NC;
......
......@@ -56,7 +56,9 @@ int SliceOpenCLKernel::Init() {
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
ocl_runtime->LoadSource(program_name, source);
ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options);
ori_format_ = out_tensors_[0]->GetFormat();
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(schema::Format_NHWC4);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(schema::Format_NHWC4);
return RET_OK;
......
......@@ -337,6 +337,7 @@ if (SUPPORT_GPU)
${TEST_DIR}/ut/src/runtime/kernel/opencl/to_format_tests.cc
${TEST_DIR}/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc
${TEST_DIR}/ut/src/runtime/kernel/opencl/prelu_tests.cc
${TEST_DIR}/ut/src/runtime/kernel/opencl/reshape_tests.cc
)
endif()
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "mindspore/core/utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/common/file_utils.h"
#include "mindspore/lite/src/runtime/opencl/opencl_runtime.h"
#include "mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h"
#include "mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h"
namespace mindspore {
class TestReshapeOpenCL : public mindspore::CommonTest {
public:
TestReshapeOpenCL() {}
};
TEST_F(TestReshapeOpenCL, ReshapeFp32) {
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
ocl_runtime->Init();
auto allocator = ocl_runtime->GetAllocator();
int c = 63;
size_t input_size;
std::string input_path = "./test_data/reshape/reshape_fp32_input.bin";
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
if (input_data == nullptr) {
MS_LOG(ERROR) << "input_data load error.";
return;
}
std::vector<int> input_shape = {1, 1, 1, c};
auto tensor_x_ptr =
std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), input_shape, schema::Format_NHWC);
auto tensor_x = tensor_x_ptr.get();
if (tensor_x == nullptr) {
MS_LOG(ERROR) << "tensor_x create error.";
return;
}
std::vector<int> out_shape = {1, c};
auto tensor_out_ptr =
std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), out_shape, schema::Format_NC);
auto tensor_out = tensor_out_ptr.get();
if (tensor_out == nullptr) {
MS_LOG(ERROR) << "tensor_out create error.";
return;
}
std::vector<lite::tensor::Tensor *> inputs{tensor_x};
std::vector<lite::tensor::Tensor *> outputs{tensor_out};
auto arith_kernel_ptr = std::make_unique<kernel::ReshapeOpenCLKernel>(nullptr, inputs, outputs);
auto arith_kernel = arith_kernel_ptr.get();
if (arith_kernel == nullptr) {
MS_LOG(ERROR) << "arith_kernel create error.";
return;
}
arith_kernel->Init();
inputs[0]->MallocData(allocator);
std::vector<kernel::LiteKernel *> kernels{arith_kernel};
auto pGraph_ptr = std::make_unique<kernel::SubGraphOpenCLKernel>(inputs, outputs, kernels, kernels, kernels);
auto pGraph = pGraph_ptr.get();
if (pGraph == nullptr) {
MS_LOG(ERROR) << "pGraph create error.";
return;
}
pGraph->Init();
memcpy(inputs[0]->Data(), input_data, input_size);
pGraph->Run();
size_t output_size;
std::string output_path = "./test_data/reshape/reshape_fp32_output.bin";
auto correct_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(output_path.c_str(), &output_size));
if (correct_data == nullptr) {
MS_LOG(ERROR) << "correct_data create error.";
return;
}
printf("==================output data=================\n");
float *output_data = reinterpret_cast<float *>(tensor_out->Data());
std::cout << std::endl;
int size_n = c;
size_n = size_n > 100 ? 100 : size_n;
for (int i = 0; i < size_n; i++) {
std::cout << output_data[i] << " ";
if ((i + 1) % c == 0) {
std::cout << std::endl;
}
}
std::cout << std::endl;
// compare
CompareOutputData(output_data, correct_data, c, 0.00001);
inputs[0]->SetData(nullptr);
outputs[0]->SetData(nullptr);
lite::opencl::OpenCLRuntime::DeleteInstance();
MS_LOG(INFO) << "Test ReshapeFp32 passed";
}
} // namespace mindspore
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册