未验证 提交 e16cb8b4 编写于 作者: R rogday 提交者: GitHub

Merge pull request #21703 from rogday:transpose

Add n-dimensional transpose to core

* add n-dimensional transpose to core

* add performance test, write sequentially and address review comments
上级 5bf3c1df
......@@ -1739,6 +1739,16 @@ should be done separately if needed.
*/
CV_EXPORTS_W void transpose(InputArray src, OutputArray dst);
/** @brief Transpose for n-dimensional matrices.
*
* @note Input should be continuous single-channel matrix.
* @param src input array.
* @param order a permutation of [0,1,..,N-1] where N is the number of axes of src.
* The i’th axis of dst will correspond to the axis numbered order[i] of the input.
* @param dst output array of the same type as src.
*/
CV_EXPORTS_W void transposeND(InputArray src, const std::vector<int>& order, OutputArray dst);
/** @brief Performs the matrix transformation of every array element.
The function cv::transform performs the matrix transformation of every
......
#include "perf_precomp.hpp"
#include <numeric>
namespace opencv_test
{
......@@ -393,6 +394,29 @@ PERF_TEST_P_(BinaryOpTest, reciprocal)
SANITY_CHECK_NOTHING();
}
PERF_TEST_P_(BinaryOpTest, transposeND)
{
Size sz = get<0>(GetParam());
int type = get<1>(GetParam());
cv::Mat a = Mat(sz, type).reshape(1);
std::vector<int> order(a.dims);
std::iota(order.begin(), order.end(), 0);
std::reverse(order.begin(), order.end());
std::vector<int> new_sz(a.dims);
std::copy(a.size.p, a.size.p + a.dims, new_sz.begin());
std::reverse(new_sz.begin(), new_sz.end());
cv::Mat b = Mat(new_sz, type);
declare.in(a,WARMUP_RNG).out(b);
TEST_CYCLE() cv::transposeND(a, order, b);
SANITY_CHECK_NOTHING();
}
INSTANTIATE_TEST_CASE_P(/*nothing*/ , BinaryOpTest,
testing::Combine(
testing::Values(szVGA, sz720p, sz1080p),
......
......@@ -4,6 +4,7 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "opencv2/core/detail/dispatch_helper.impl.hpp"
namespace cv {
......@@ -282,6 +283,72 @@ void transpose( InputArray _src, OutputArray _dst )
}
void transposeND(InputArray src_, const std::vector<int>& order, OutputArray dst_)
{
Mat inp = src_.getMat();
CV_Assert(inp.isContinuous());
CV_CheckEQ(inp.channels(), 1, "Input array should be single-channel");
CV_CheckEQ(order.size(), static_cast<size_t>(inp.dims), "Number of dimensions shouldn't change");
auto order_ = order;
std::sort(order_.begin(), order_.end());
for (size_t i = 0; i < order_.size(); ++i)
{
CV_CheckEQ(static_cast<size_t>(order_[i]), i, "New order should be a valid permutation of the old one");
}
std::vector<int> newShape(order.size());
for (size_t i = 0; i < order.size(); ++i)
{
newShape[i] = inp.size[order[i]];
}
dst_.create(static_cast<int>(newShape.size()), newShape.data(), inp.type());
Mat out = dst_.getMat();
CV_Assert(out.isContinuous());
CV_Assert(inp.data != out.data);
int continuous_idx = 0;
for (int i = static_cast<int>(order.size()) - 1; i >= 0; --i)
{
if (order[i] != i)
{
continuous_idx = i + 1;
break;
}
}
size_t continuous_size = continuous_idx == 0 ? out.total() : out.step1(continuous_idx - 1);
size_t outer_size = out.total() / continuous_size;
std::vector<size_t> steps(order.size());
for (int i = 0; i < static_cast<int>(steps.size()); ++i)
{
steps[i] = inp.step1(order[i]);
}
auto* src = inp.ptr<const unsigned char>();
auto* dst = out.ptr<unsigned char>();
size_t src_offset = 0;
size_t es = out.elemSize();
for (size_t i = 0; i < outer_size; ++i)
{
std::memcpy(dst, src + es * src_offset, es * continuous_size);
dst += es * continuous_size;
for (int j = continuous_idx - 1; j >= 0; --j)
{
src_offset += steps[j];
if ((src_offset / steps[j]) % out.size[j] != 0)
{
break;
}
src_offset -= steps[j] * out.size[j];
}
}
}
#if CV_SIMD128
template<typename V> CV_ALWAYS_INLINE void flipHoriz_single( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
{
......
......@@ -3,6 +3,7 @@
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "ref_reduce_arg.impl.hpp"
#include <algorithm>
namespace opencv_test { namespace {
......@@ -2128,6 +2129,79 @@ TEST(Core_minMaxIdx, regression_9207_1)
}
class TransposeND : public testing::TestWithParam< tuple<std::vector<int>, perf::MatType> >
{
public:
std::vector<int> m_shape;
int m_type;
void SetUp()
{
std::tie(m_shape, m_type) = GetParam();
}
};
TEST_P(TransposeND, basic)
{
Mat inp(m_shape, m_type);
randu(inp, 0, 255);
std::vector<int> order(m_shape.size());
std::iota(order.begin(), order.end(), 0);
auto transposer = [&order] (const std::vector<int>& id)
{
std::vector<int> ret(id.size());
for (size_t i = 0; i < id.size(); ++i)
{
ret[i] = id[order[i]];
}
return ret;
};
auto advancer = [&inp] (std::vector<int>& id)
{
for (int j = static_cast<int>(id.size() - 1); j >= 0; --j)
{
++id[j];
if (id[j] != inp.size[j])
{
break;
}
id[j] = 0;
}
};
do
{
Mat out;
cv::transposeND(inp, order, out);
std::vector<int> id(order.size());
for (size_t i = 0; i < inp.total(); ++i)
{
auto new_id = transposer(id);
switch (inp.type())
{
case CV_8UC1:
ASSERT_EQ(inp.at<uint8_t>(id.data()), out.at<uint8_t>(new_id.data()));
break;
case CV_32FC1:
ASSERT_EQ(inp.at<float>(id.data()), out.at<float>(new_id.data()));
break;
default:
FAIL() << "Unsupported type: " << inp.type();
}
advancer(id);
}
} while (std::next_permutation(order.begin(), order.end()));
}
INSTANTIATE_TEST_CASE_P(Arithm, TransposeND, testing::Combine(
testing::Values(std::vector<int>{2, 3, 4}, std::vector<int>{5, 10}),
testing::Values(perf::MatType(CV_8UC1), CV_32FC1)
));
TEST(Core_minMaxIdx, regression_9207_2)
{
const int rows = 13;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册