...
 
Commits (5)
    https://gitcode.net/wjd2002/ncnn/-/commit/1e0d70af8ccbc3787d6697981c748fd0e067f662 Add translated document: glsl-extension.zh.md (#4818) 2023-07-02T11:47:44+08:00 張小凡 2672931+whyb@users.noreply.github.com https://gitcode.net/wjd2002/ncnn/-/commit/dee2e0dc0cabbaf514e9aa6deba51752df0490cd fix ios-simulator-gpu badge (#4836) 2023-07-03T21:00:42+08:00 未知时光 732857315@qq.com https://gitcode.net/wjd2002/ncnn/-/commit/e8d8042b9051d02e0734dfe9d011931159fbfcb8 Fix a mistake in docs/faq (#4837) 2023-07-04T08:17:10+08:00 Kin Yu Shek 52855847+KYShek@users.noreply.github.com https://gitcode.net/wjd2002/ncnn/-/commit/47e0daf4a15bc222a0e5fad76dd94efdc0f47934 Translate x86_64 SSE to ppc64le VSX intrinsics (#4807) 2023-07-06T16:02:05+08:00 JeremyRand 244188+JeremyRand@users.noreply.github.com * Add POWER9 VSX toolchains Translating x86_64 SSE to ppc64le VSX intrinsics yields a quite large speedup on POWER9. See this article for background: <a href="https://www.talospace.com/2019/07/easier-power-vectorizing-for-fun-and.html" rel="nofollow noreferrer noopener" target="_blank">https://www.talospace.com/2019/07/easier-power-vectorizing-for-fun-and.html</a> * Add power9le docs * power9le clang toolchain: Document Clang 13+ requirement --------- Co-authored-by: <span data-trailer="Co-authored-by:"><a href="mailto:jeremyrand@danwin1210.de" title="jeremyrand@danwin1210.de"></a><a href="javascript:void(0)" class="avatar s16 avatar-inline identicon bg6" style="text-decoration: none">N</a><a href="mailto:jeremyrand@danwin1210.de" title="jeremyrand@danwin1210.de">Jeremy Rand</a> &lt;<a href="mailto:jeremyrand@danwin1210.de" title="jeremyrand@danwin1210.de">jeremyrand@danwin1210.de</a>&gt;</span> https://gitcode.net/wjd2002/ncnn/-/commit/91090d793b80d4bd8453641607acfa984a9be383 pnnx fix build, prepend batch for broadcast reshape (#4841) 2023-07-06T18:55:23+08:00 nihui nihuini@tencent.com * fix build, prepend batch for broadcast reshape * sanitize filename * do not fuse to eltwise if broadcast
......@@ -73,3 +73,49 @@ jobs:
export PATH=$GITHUB_WORKSPACE/qemu-install/bin:$PATH
cd build
TESTS_EXECUTABLE_LOADER=qemu-ppc64le TESTS_EXECUTABLE_LOADER_ARGUMENTS="-L;/usr/powerpc64le-linux-gnu" ctest --output-on-failure -j 2
linux-gcc-power9le-vsx:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: cache-qemu
id: cache-qemu
uses: actions/cache@v3
with:
path: qemu-install
key: qemu-ppc64le-install-20220502-2
- name: install-qemu-build-deps
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
uses: actions/checkout@v3
with:
repository: qemu/qemu
path: qemu
ref: f5643914a9e8f79c606a76e6a9d7ea82a3fc3e65
- name: qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
cd qemu
./configure --prefix=$GITHUB_WORKSPACE/qemu-install --target-list=ppc64le-linux-user --disable-system
make -j2
make install
- name: powerpc64le-gnu-toolchain
run: |
sudo apt-get update
sudo apt-get install g++-powerpc64le-linux-gnu
- name: configure
run: mkdir build && cd build && cmake -DCMAKE_TOOLCHAIN_FILE=../toolchains/power9le-linux-gnu-vsx.toolchain.cmake -DNCNN_BUILD_TOOLS=OFF -DNCNN_BUILD_EXAMPLES=OFF -DNCNN_BUILD_TESTS=ON ..
- name: build
run: cmake --build build -j 2
- name: test
run: |
export PATH=$GITHUB_WORKSPACE/qemu-install/bin:$PATH
cd build
TESTS_EXECUTABLE_LOADER=qemu-ppc64le TESTS_EXECUTABLE_LOADER_ARGUMENTS="-L;/usr/powerpc64le-linux-gnu" ctest --output-on-failure -j 2
......@@ -364,6 +364,20 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(riscv)")
endif()
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)")
set(NCNN_TARGET_ARCH powerpc)
if(NCNN_PPC64LE_VSX)
set(NCNN_TARGET_ARCH x86)
set(CMAKE_REQUIRED_FLAGS "-DNO_WARN_X86_INTRINSICS -D__SSE4_1__")
check_cxx_source_compiles("#include <smmintrin.h>\nint main() { __m128i _v, _a, _b; _v = _mm_packus_epi32(_a, _b); return 0; }" NCNN_COMPILER_SUPPORT_PPC64LE_SSE41)
unset(CMAKE_REQUIRED_FLAGS)
if(NCNN_COMPILER_SUPPORT_PPC64LE_SSE41)
option(NCNN_SSE41 "optimize ppc64le platform with sse4.1 extension" ON)
else()
message(WARNING "The compiler does not support sse4.1 extension. NCNN_SSE41 will be OFF.")
endif()
endif()
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(xtensa)")
set(NCNN_TARGET_ARCH xtensa)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(s390x)")
......
......@@ -74,6 +74,7 @@ ncnn 目前已在腾讯多款应用中使用,如:QQ,Qzone,微信,天
[pass-ios-cpu]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/ios-cpu.yml?branch=master
[pass-ios-simulator]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/ios-simulator.yml?branch=master
[pass-ios-simulator]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/ios-simulator.yml?branch=master
[pass-ios-simulator-gpu]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/ios-simulator-gpu.yml?branch=master
[pass-linux-aarch64-cpu-gcc]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/linux-aarch64-cpu-gcc.yml?branch=master
[pass-linux-arm-cpu-gcc]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/linux-arm-cpu-gcc.yml?branch=master
[pass-linux-loongarch64-cpu-gcc]: https://img.shields.io/github/actions/workflow/status/Tencent/ncnn/linux-loongarch64-cpu-gcc.yml?branch=master
......@@ -111,6 +112,7 @@ ncnn 目前已在腾讯多款应用中使用,如:QQ,Qzone,微信,天
[ci-ios-cpu]: https://github.com/Tencent/ncnn/actions?query=workflow%3Aios-cpu
[ci-ios-simulator]: https://github.com/Tencent/ncnn/actions?query=workflow%3Aios-simulator
[ci-ios-simulator]: https://github.com/Tencent/ncnn/actions?query=workflow%3Aios-simulator
[ci-ios-simulator-gpu]: https://github.com/Tencent/ncnn/actions?query=workflow%3Aios-simulator-gpu
[ci-linux-aarch64-cpu-gcc]: https://github.com/Tencent/ncnn/actions?query=workflow%3Alinux-aarch64-cpu-gcc
[ci-linux-arm-cpu-gcc]: https://github.com/Tencent/ncnn/actions?query=workflow%3Alinux-arm-cpu-gcc
[ci-linux-loongarch64-cpu-gcc]: https://github.com/Tencent/ncnn/actions?query=workflow%3Alinux-loongarch64-cpu-gcc
......@@ -203,7 +205,7 @@ ncnn 目前已在腾讯多款应用中使用,如:QQ,Qzone,微信,天
**[how to build ncnn library](https://github.com/Tencent/ncnn/wiki/how-to-build) on Linux / Windows / macOS / Raspberry Pi3, Pi4 / Android / NVIDIA Jetson / iOS / WebAssembly / AllWinner D1 / Loongson 2K1000**
- [Build for Linux / NVIDIA Jetson / Raspberry Pi3, Pi4](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux)
- [Build for Linux / NVIDIA Jetson / Raspberry Pi3, Pi4 / POWER9](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux)
- [Build for Windows x64 using VS2017](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-windows-x64-using-visual-studio-community-2017)
- [Build for macOS](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-macos)
- [Build for ARM Cortex-A family with cross-compiling](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-arm-cortex-a-family-with-cross-compiling)
......@@ -296,6 +298,7 @@ ncnn 目前已在腾讯多款应用中使用,如:QQ,Qzone,微信,天
| arm-gpu | ❔ | ❔ | ✔️ | / | / |
| apple-cpu | / | / | / | ✔️ | ✅ |
| apple-gpu | / | / | / | ✔️ | ✔️ |
| ibm-cpu | / | ✔️ | / | / | / |
---
......
# ncnn GLSL 扩展
## 理由
不同的 GPU 支持不同的功能,有的支持 fp16 作为缓冲存储类型,有的支持 fp16 作为操作数变量,有的老 GPU 只支持 fp32。
当 GPU 支持 `VK_KHR_16bit_storage` 扩展时,为了尽量减少 GPU 的内存带宽消耗,我们会优先使用 fp16 作为存储类型。否则,我们使用 `packHalf2x16``unpackHalf2x16` 在 GLSL 4.2 中将 2 个 fp32 压缩为 uint,从而减少读写带宽。
同样,当 GPU 支持 `VK_KHR_shader_float16_int8` 扩展时,为了加快计算效率,我们会优先使用 fp16 作为运算操作数,这通常会使速度翻倍。否则,我们使用 fp32。
为了确保最广泛的兼容性,将编写以下用于声明描述符绑定和加载数据的代码
```c
#if NCNN_fp16_storage // GPU支持 16bit storage
layout (binding = 0) buffer blob { f16vec4 blob_data[]; };
#elif NCNN_fp16_packed // GPU支持 GLSL 4.2
layout (binding = 0) buffer blob { uvec2 blob_data[]; };
#else // GPU仅支持 fp32
layout (binding = 0) buffer blob { vec4 blob_data[]; };
#endif
void main()
{
const int i = int(gl_GlobalInvocationID.x);
#if NCNN_fp16_storage && NCNN_fp16_arithmetic // GPU支持 16bit storage 和 shader float16
f16vec4 x = blob_data[i];
#elif NCNN_fp16_storage // GPU支持 16bit storage 但不包含 shader float16
vec4 x = vec4(blob_data[i]);
#elif NCNN_fp16_packed && NCNN_fp16_arithmetic // GPU支持 GLSL 4.2 和 shader float16
f16vec4 x = f16vec4(unpackFloat2x16(blob_data[i].x), unpackFloat2x16(blob_data[i].y));
#elif NCNN_fp16_packed // GPU支持 GLSL 4.2
vec4 x = vec4(unpackHalf2x16(blob_data[i].x), unpackHalf2x16(blob_data[i].y));
#else // GPU仅支持 fp32
vec4 x = blob_data[i];
#endif
}
```
如您所见,仅声明缓冲区类型并读取值会消耗大量代码行,这是项目维护的噩梦。因此,ncnn 增加了更灵活的数据类型和辅助函数,以减小代码的大小并提高可读性,并且会根据 GPU 支持的功能级别自动扩展到最高效的实现。
上面的代码,通过使用 ncnn GLSL 扩展,可以简化为
```c
layout (binding = 0) buffer blob { sfpvec4 blob_data[]; };
void main()
{
const int i = int(gl_GlobalInvocationID.x);
afpvec4 x = buffer_ld4(blob_data, i);
}
```
ncnn GLSL 扩展为存储、计算、共享内存以及缓冲区和图像的加载、存储、转换函数提供了必要的数据类型。我们还提供了一些缓冲区和图像复制函数,以防止在使用 fp16 作为中间数据类型时丢失精度,并避免不必要的 `unpackHalf2x16``packHalf2x16` 配对。
# 编译GLSL的入口点
ncnn库中的 gpu.h 头文件公开了3个用于将 GLSL 代码编译为 Spir-V 二进制的API函数,它们支持 ncnn GLSL 扩展,这3个函数接受 opt switch 来控制 ncnn GLSL 扩展形式。前两个函数接受原始 GLSL 代码字符串作为参数,最后一个函数用于创建 ncnn 的已存在的内置着色器。
```cpp
namespace ncnn {
// 在线 Spir-V 编译器
NCNN_EXPORT int compile_spirv_module(const char* comp_string, const Option& opt, std::vector<uint32_t>& spirv);
NCNN_EXPORT int compile_spirv_module(const char* comp_data, int comp_data_size, const Option& opt, std::vector<uint32_t>& spirv);
NCNN_EXPORT int compile_spirv_module(int shader_type_index, const Option& opt, std::vector<uint32_t>& spirv);
} // namespace ncnn
```
## 直接编译ncnn扩展GLSL代码
您可以使用 ncnn GLSL 扩展编写着色器代码,使用 ncnn 函数编译为 Spir-V。编译后的产品是符合标准的 Spir-V 二进制文件,可以直接用于在 Vulkan API 中创建流水线对象
```cpp
static const char my_glsl_data[] = R"(
#version 450
#if NCNN_fp16_storage
#extension GL_EXT_shader_16bit_storage: require
#endif
#if NCNN_fp16_arithmetic
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
#endif
layout (binding = 0) readonly buffer a_blob { sfpvec4 a_blob_data[]; };
layout (binding = 1) writeonly buffer b_blob { sfpvec4 b_blob_data[]; };
void main()
{
const int i = int(gl_GlobalInvocationID.x);
afpvec4 v = buffer_ld4(a_blob_data, i);
v = v + 123;
buffer_st4(b_blob_data, i, v);
}
)";
Option opt;
// 您可以控制Vulkan扩展行为
// 当GPU支持16位存储的话
opt.use_fp16_storage = false;
std::vector<uint32_t> spirv;
ncnn::compile_spirv_module(my_glsl_data, sizeof(my_glsl_data) - 1, opt, spirv);
// 稍后再创建管道对象
// ncnn::Pipeline pipeline(vkdev);
// pipeline.set_local_size_xyz(64, 1, 1);
// pipeline.create(spirv.data(), spirv.size() * 4, specializations);
```
## ncnn内置着色器
ncnn内部的着色器索引在标头中公开,如果需要可以使用 `layer_shader_type.h`
```cpp
#include "layer_shader_type.h"
int shader_type_index = LayerShaderType::convert_ycbcr;
Option opt;
std::vector<uint32_t> spirv;
int retc = compile_spirv_module(shader_type_index, opt, spirv);
```
# 数据类型
## 存储类型(storage type)
在描述符绑定中声明缓冲区数据布局
```c
layout (binding = 0) buffer top_blob { sfpvec4 top_blob_data[]; };
```
|存储类型|fp32|fp16p|fp16s|
|---|---|---|---|
|sfp|float|float|float16_t|
|sfpvec2|vec2|uint|f16vec2|
|sfpvec4|vec4|uvec2|f16vec4|
|sfpvec8|mat2x4|uvec4|f16mat2x4|
## 算术类型(arithmetic type)
在 GLSL 代码中声明局部变量
```c
void main()
{
afpvec4 v = a * b;
}
```
|算术类型|fp32|fp16a|
|---|---|---|
|afp|float|float16_t|
|afpvec2|vec2|f16vec2|
|afpvec4|vec4|f16vec4|
|afpvec8|mat2x4|f16mat2x4|
## 本地类型(local type)
在共享本地内存中声明变量
```c
shared lfp tmp_a[8][4][2];
```
|local type|fp32|fp16p / fp16s|fp16s + fp16a|
|---|---|---|---|
|lfp|float|float|float16_t|
|lfpvec4|vec4|uvec2|f16vec4|
## 图像格式类型(image format type)和精度类型(precision hint type)
在描述符绑定中声明图像格式
```c
layout (binding = 0) uniform unfp sampler3D bottom_blob_3d;
layout (binding = 1, imfmtc4) writeonly uniform unfp image3D top_blob_3d;
```
|格式类型|fp32|fp16p|fp16s|
|---|---|---|---|
|imfmt1|r32f|f32f|r16f|
|imfmt4|rgba32f|rgba16f|rgba16f|
|精度类型|fp32|fp16p|fp16s|
|---|---|---|---|
|unfp|highp|mediump|mediump|
# 缓冲区函数(buffer functions)
- 从 src[offset] 加载已经确定类型的值
```c
afp buffer_ld1(sfp src, int offset);
afpvec2 buffer_ld2(sfpvec2 src, int offset);
afpvec4 buffer_ld4(sfpvec4 src, int offset);
afpvec8 buffer_ld8(sfpvec8 src, int offset);
```
- 将已确定类型的值存储到 dst[偏移量]
```c
void buffer_st1(sfp dst, int offset, afp v);
void buffer_st2(sfpvec2 dst, int offset, afpvec2 v);
void buffer_st4(sfpvec4 dst, int offset, afpvec4 v);
void buffer_st8(sfpvec8 dst, int offset, afpvec8 v);
```
- 从已确定类型 src[src_offset] 的值拷贝到 dst[dst_offset]
```c
void buffer_cp1(sfp dst, int dst_offset, sfp src, int src_offset);
void buffer_cp2(sfpvec2 dst, int dst_offset, sfpvec2 src, int src_offset);
void buffer_cp4(sfpvec4 dst, int dst_offset, sfpvec4 src, int src_offset);
void buffer_cp8(sfpvec4 dst, int dst_offset, sfpvec4 src, int src_offset);
```
- 从 src[src_offsets[0],src_offsets[1],...] 的值拷贝并打包到 dst[dst_offset]
```c
void buffer_cp1to4(sfpvec4 dst, int dst_offset, sfp src, ivec4 src_offsets);
void buffer_cp1to8(sfpvec8 dst, int dst_offset, sfp src, ivec4 src_offsets_0, ivec4 src_offsets_1);
void buffer_cp4to8(sfpvec8 dst, int dst_offset, sfpvec4 src, ivec2 src_offsets);
```
- 从 src[src_offset] 的值拷贝并解包到 dst[dst_offsets[0],dst_offsets[1],...]
```c
void buffer_cp4to1(sfp dst, ivec4 dst_offsets, sfpvec4 src, int src_offset);
void buffer_cp8to1(sfp dst, ivec4 dst_offsets_0, ivec4 dst_offsets_1, sfpvec8 src, int src_offset);
void buffer_cp8to4(sfpvec4 dst, ivec2 dst_offsets, sfpvec8 src, int src_offset);
```
# 图像函数
- 根据 sampler?D 图像(透过 src 和 pos) 来加载数据
```c
afp image1d_ld1(sampler1D src, float pos);
afp image2d_ld1(sampler2D src, vec2 pos);
afp image3d_ld1(sampler3D src, vec3 pos);
afpvec4 image1d_ld4(sampler1D src, float pos);
afpvec4 image2d_ld4(sampler2D src, vec2 pos);
afpvec4 image3d_ld4(sampler3D src, vec3 pos);
afpvec8 image1d_ld8(sampler1D src, float pos);
afpvec8 image2d_ld8(sampler2D src, vec2 pos);
afpvec8 image3d_ld8(sampler3D src, vec3 pos);
```
- 存储确定类型的值到 image?D (透过 dst 和 pos 参数)
```c
void image1d_st1(image1D dst, int pos, afp v);
void image2d_st1(image2D dst, ivec2 pos, afp v);
void image3d_st1(image3D dst, ivec3 pos, afp v);
void image1d_st4(image1D dst, int pos, afpvec4 v);
void image2d_st4(image2D dst, ivec2 pos, afpvec4 v);
void image3d_st4(image3D dst, ivec3 pos, afpvec4 v);
void image1d_st8(image1D dst, int pos, afpvec8 v);
void image2d_st8(image2D dst, ivec2 pos, afpvec8 v);
void image3d_st8(image3D dst, ivec3 pos, afpvec8 v);
```
- 把 sampler?D 的值的内容(透过 src 和 src_pos 参数) 拷贝到 image?D (透过 dst 和 dst_pos 参数)
```c
void image1d_cp1(image1D dst, int dst_pos, sampler1D src, float src_pos);
void image2d_cp1(image2D dst, ivec2 dst_pos, sampler2D src, vec2 src_pos);
void image3d_cp1(image3D dst, ivec3 dst_pos, sampler3D src, vec3 src_pos);
void image1d_cp4(image1D dst, int dst_pos, sampler1D src, float src_pos);
void image2d_cp4(image2D dst, ivec2 dst_pos, sampler2D src, vec2 src_pos);
void image3d_cp4(image3D dst, ivec3 dst_pos, sampler3D src, vec3 src_pos);
void image1d_cp8(image1D dst, int dst_pos, sampler1D src, float src_pos);
void image2d_cp8(image2D dst, ivec2 dst_pos, sampler2D src, vec2 src_pos);
void image3d_cp8(image3D dst, ivec3 dst_pos, sampler3D src, vec3 src_pos);
```
注意:由于图像是不透明的数据结构,因此不提供复制和打包/解包功能。要实现此操作,您需要先加载,然后再存储。
# 本地数据转换函数
- 存储缓冲区转换到本地内存
```c
lfp sfp2lfp(sfp v);
lfpvec4 sfp2lfpvec4(sfpvec4 v);
```
- 本地内存转换到局部变量
```c
afp lfp2afp(lfp v);
afpvec4 lfp2afpvec4(lfpvec4 v);
```
注意:本地内存的常见用法是先从全局内存中读取,存储在本地内存中,然后再从本地内存中读取局部变量以供后续使用。因此,此处仅提供存储类型到本地类型和本地类型到算术类型的转换函数。
# 杂项函数
- 更推荐使用专业化常量(specialization constants),而不是推动常量(push constants)
```c
T psc(T x)
```
`专用常量``推送常量` 部分中声明相同的变量,然后在专用常量给定非零时 `psc(x)` 将成为编译时常量,否则将通过推送常量动态。这通常用于张量形状特化。我们通常可以解析所有形状信息,并使它们成为编译时常量,以实现让着色器得到更积极的优化。
```c
layout (constant_id = 0) const int size = 0;
layout (push_constant) uniform parameter
{
int size;
} p;
void main()
{
const int s = psc(size);
}
```
# 平台宏定义
判断当前平台是否为 moltenvk,以启用对于某些特定于平台的解决方法
```c
#if NCNN_moltenvk
// 启用moltenvk的解决方法
#endif
```
# 条件宏定义与option的关系
仅当用户启用某些选项时才启用 GLSL 扩展
```c
#if NCNN_fp16_storage
#extension GL_EXT_shader_16bit_storage: require
#endif
#if NCNN_fp16_arithmetic
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
#endif
```
声明图像或缓冲区的描述符绑定
```c
#if NCNN_image_shader
layout (binding = 0) uniform unfp sampler3D bottom_blob_3d;
#else
layout (binding = 0) readonly buffer bottom_blob { sfpvec4 bottom_blob_data[]; };
#endif
```
|宏定义|option中所定义的变量|
|---|---|
|NCNN_fp16_packed|opt.use_fp16_packed|
|NCNN_fp16_storage|opt.use_fp16_storage|
|NCNN_fp16_arithmetic|opt.use_fp16_arithmetic|
|NCNN_int8_packed|opt.use_int8_packed|
|NCNN_int8_storage|opt.use_int8_storage|
|NCNN_int8_arithmetic|opt.use_int8_arithmetic|
|NCNN_image_shader|opt.use_image_storage|
|NCNN_shader_local_memory|opt.use_shader_local_memory|
......@@ -287,7 +287,7 @@ Fully customizable op, first change to one that can export (e.g. concat slice),
First of all, you need to manage the memory you request yourself, at this point ncnn::Mat will not automatically free up the float data you pass over to it
``` c++
std::vector<float> testData(60, 1.0); // use std::vector<float> to manage memory requests and releases yourself
ncnn::Mat in1(60, (void*)testData.data()).reshape(4, 5, 3); // just pass the pointer to the float data as a void*, and even specify the dimension (up says it's best to use reshape to solve the channel gap)
ncnn::Mat in1 = ncnn::Mat(60, (void*)testData.data()).reshape(4, 5, 3); // just pass the pointer to the float data as a void*, and even specify the dimension (up says it's best to use reshape to solve the channel gap)
float* a = new float[60]; // New a piece of memory yourself, you need to release it later
ncnn::Mat in2 = ncnn::Mat(60, (void*)a).reshape(4, 5, 3).clone(); // use the same method as above, clone() to transfer data owner
```
......@@ -290,10 +290,10 @@ ex.extract("output2", out_2);
首先,自己申请的内存需要自己管理,此时ncnn::Mat不会自动给你释放你传过来的float数据
``` c++
std::vector<float> testData(60, 1.0); // 利用std::vector<float>自己管理内存的申请和释放
ncnn::Mat in1(60, (void*)testData.data()).reshape(4, 5, 3); // 把float数据的指针转成void*传过去即可,甚至还可以指定维度(up说最好使用reshape用来解决channel gap)
float* a = new float[60]; // 自己new一块内存,后续需要自己释放
ncnn::Mat in2 = ncnn::Mat(60, (void*)a).reshape(4, 5, 3).clone(); // 使用方法和上面相同,clone() to transfer data owner
std::vector<float> testData(60, 1.0); // 利用std::vector<float>自己管理内存的申请和释放
ncnn::Mat in1 = ncnn::Mat(60, (void*)testData.data()).reshape(4, 5, 3); // 把float数据的指针转成void*传过去即可,甚至还可以指定维度(up说最好使用reshape用来解决channel gap)
float* a = new float[60]; // 自己new一块内存,后续需要自己释放
ncnn::Mat in2 = ncnn::Mat(60, (void*)a).reshape(4, 5, 3).clone(); // 使用方法和上面相同,clone() to transfer data owner
```
- ## 如何初始化 ncnn::Mat 为全 0
......
......@@ -10,6 +10,7 @@ git submodule update --init
- [Build for Linux](#build-for-linux)
- [Nvidia Jetson](#nvidia-jetson)
- [Raspberry Pi](#raspberry-pi)
- [POWER9](#power9)
- [Verification](#verification)
- [Build for Windows x64 using Visual Studio Community 2017](#build-for-windows-x64-using-visual-studio-community-2017)
- [Build for macOS](#build-for-macos)
......@@ -87,6 +88,22 @@ You can add `-GNinja` to `cmake` above to use Ninja build system (invoke build u
For Rasberry Pi 3 on 32bit OS, add `-DCMAKE_TOOLCHAIN_FILE=../toolchains/pi3.toolchain.cmake` to cmake. You can also consider disabling Vulkan support as the Vulkan drivers for Rasberry Pi are still not mature, but it doesn't hurt to build the support in, but not use it.
#### POWER9
With Clang 13 or higher:
```shell
cd ncnn
mkdir -p build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../toolchains/power9le-linux-gnu-vsx.clang.toolchain.cmake -DNCNN_VULKAN=ON -DNCNN_BUILD_EXAMPLES=ON ..
make -j$(nproc)
```
Earlier versions of Clang may fail to build ncnn due to [Bug 49864](https://github.com/llvm/llvm-project/issues/49864). To use GCC instead, use the `power9le-linux-gnu-vsx.toolchain.cmake` toolchain file instead. Note that according to benchmarks, Clang appears to produce noticeably faster CPU inference than GCC for POWER9 targets.
Note that the POWER9 toolchain files only support little-endian mode.
#### Verification
Verify build by running some examples:
......
......@@ -502,6 +502,13 @@ if(NCNN_TARGET_ARCH STREQUAL "riscv" AND NOT C906)
endif()
endif()
if(NCNN_PPC64LE_VSX)
# Auto-translate SSE4.1 to VSX if compiler is new enough.
if(NCNN_SSE41)
target_compile_options(ncnn PRIVATE -DNO_WARN_X86_INTRINSICS -D__SSE4_1__)
endif()
endif()
if(NCNN_COVERAGE)
target_compile_options(ncnn PUBLIC -coverage -fprofile-arcs -ftest-coverage)
target_link_libraries(ncnn PUBLIC -coverage -lgcov)
......
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR powerpc64le)
set(CMAKE_C_COMPILER "clang")
set(CMAKE_CXX_COMPILER "clang++")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_C_FLAGS "-target powerpc64le-linux-gnu -I/usr/powerpc64le-linux-gnu/include -mcpu=power9 -mtune=power9 -DNO_WARN_X86_INTRINSICS -D__MMX__ -D__SSE__ -D__SSE2__ -D__SSSE3__")
set(CMAKE_CXX_FLAGS "-target powerpc64le-linux-gnu -I/usr/powerpc64le-linux-gnu/include -I/usr/powerpc64le-linux-gnu/include/c++/10/powerpc64le-linux-gnu -mcpu=power9 -mtune=power9 -DNO_WARN_X86_INTRINSICS -D__MMX__ -D__SSE__ -D__SSE2__ -D__SSSE3__")
# cache flags
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "c flags")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}" CACHE STRING "c++ flags")
# Auto-translate SSE to VSX
set(NCNN_PPC64LE_VSX ON)
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR powerpc64le)
set(CMAKE_C_COMPILER "powerpc64le-linux-gnu-gcc")
set(CMAKE_CXX_COMPILER "powerpc64le-linux-gnu-g++")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_C_FLAGS "-mcpu=power9 -mtune=power9 -DNO_WARN_X86_INTRINSICS -D__MMX__ -D__SSE__ -D__SSE2__ -D__SSSE3__")
set(CMAKE_CXX_FLAGS "-mcpu=power9 -mtune=power9 -DNO_WARN_X86_INTRINSICS -D__MMX__ -D__SSE__ -D__SSE2__ -D__SSSE3__")
# cache flags
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "c flags")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}" CACHE STRING "c++ flags")
# Auto-translate SSE to VSX
set(NCNN_PPC64LE_VSX ON)
......@@ -47,7 +47,10 @@
static std::string get_basename(const std::string& path)
{
return path.substr(0, path.find_last_of('.'));
std::string base = path.substr(0, path.find_last_of('.'));
// sanitize -
std::replace(base.begin(), base.end(), '-', '_');
return base;
}
static void parse_string_list(char* s, std::vector<std::string>& list)
......
......@@ -14,6 +14,8 @@
#include "eval_expression.h"
#include <fenv.h>
#include <float.h>
#include <math.h>
#include <iostream>
......@@ -276,14 +278,10 @@ static std::string eval_expression(const Operator* op)
if (t == "round")
{
// round to nearest even
#if FLT_ROUNDS != FE_TONEAREST
int old_rm = fegetround();
fesetround(FE_TONEAREST);
#endif
float r = nearbyintf(af);
#if FLT_ROUNDS != FE_TONEAREST
fesetround(old_rm);
#endif
exprstack.push(std::to_string(r));
}
if (t == "rsqrt")
......
......@@ -48,6 +48,13 @@ pnnx.Output output 1 0 out
return "weighted_sum";
}
bool match(const std::map<std::string, const Operator*>& matched_operators) const
{
auto a_shape = matched_operators.at("op_0")->inputs[0]->shape;
auto b_shape = matched_operators.at("op_1")->inputs[0]->shape;
return !a_shape.empty() && a_shape == b_shape;
}
void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
float c0 = 1.f;
......@@ -93,6 +100,13 @@ pnnx.Output output 1 0 out
return "weighted_sum";
}
bool match(const std::map<std::string, const Operator*>& matched_operators) const
{
auto a_shape = matched_operators.at("op_0")->inputs[0]->shape;
auto b_shape = matched_operators.at("op_1")->inputs[1]->shape;
return !a_shape.empty() && a_shape == b_shape;
}
void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
float c0 = 1.f;
......@@ -133,6 +147,13 @@ pnnx.Output output 1 0 out
return "weighted_sum";
}
bool match(const std::map<std::string, const Operator*>& matched_operators) const
{
auto a_shape = matched_operators.at("op_1")->inputs[0]->shape;
auto b_shape = matched_operators.at("op_0")->inputs[0]->shape;
return !a_shape.empty() && a_shape == b_shape;
}
void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
float c0 = 1.f;
......
......@@ -138,6 +138,11 @@ void insert_reshape_numpy_binaryop_broadcast(Graph& graph)
reshape0_shape.insert(reshape0_shape.begin(), 1);
}
if (batch_index0 != 233)
{
reshape0_shape.insert(reshape0_shape.begin() + batch_index0, 1);
}
reshape0->params["shape"] = reshape0_shape;
break;
......