未验证 提交 3e6c0843 编写于 作者: S Santa An 提交者: GitHub

[LITE][BM] modify interpolate if,test=develop (#3904)

* [LITE][BM] modify interpolate if,test=develop

* [LITE][BM] add include math.h, test=develop
上级 1aef8141
......@@ -59,9 +59,9 @@ void TestModel(const std::vector<Place>& valid_places) {
}
auto* image_tensor = predictor.GetInput(1);
image_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 2})));
data = image_tensor->mutable_data<float>();
data[0] = FLAGS_im_height;
data[1] = FLAGS_im_width;
auto* data_1 = image_tensor->mutable_data<int>();
data_1[0] = FLAGS_im_height;
data_1[1] = FLAGS_im_width;
for (int i = 0; i < FLAGS_warmup; ++i) {
predictor.Run();
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include <bmcompiler_if.h>
#include <math.h>
#include "lite/kernels/bm/bridges/graph.h"
#include "lite/kernels/bm/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h"
......@@ -64,10 +65,16 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto* bias_data = bias->mutable_data<float>();
auto* mean_data = mean->mutable_data<float>();
auto* variance_data = variance->mutable_data<float>();
float* new_bias = static_cast<float*>(malloc(bias->memory_size()));
float* new_scale = static_cast<float*>(malloc(scale->memory_size()));
CHECK(new_bias != nullptr);
CHECK(new_scale != nullptr);
for (int c = 0; c < channel_size; c++) {
float inv_scale = 1.f / (std::sqrt(variance_data[c] + epsilon));
bias_data[c] = bias_data[c] - inv_scale * scale_data[c] * mean_data[c];
scale_data[c] = inv_scale * scale_data[c];
new_bias[c] = bias_data[c] - inv_scale * scale_data[c] * mean_data[c];
new_scale[c] = inv_scale * scale_data[c];
}
const int input_num = 1;
......@@ -86,11 +93,13 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
output_dims.size(),
static_cast<const char*>(output_var_name.c_str()),
static_cast<const char*>(unique_op_name.c_str()),
static_cast<const float*>(scale->mutable_data<float>()),
static_cast<const float*>(bias->mutable_data<float>()),
static_cast<const float*>(new_scale),
static_cast<const float*>(new_bias),
1,
1,
1);
free(new_scale);
free(new_bias);
delete[] shape;
delete[] name;
delete[] dim;
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include <bmcompiler_if.h>
#include <math.h>
#include "lite/kernels/bm/bridges/graph.h"
#include "lite/kernels/bm/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h"
......
......@@ -76,6 +76,8 @@ int InterpolateConverter(void* ctx, OpLite* op, KernelBase* kernel) {
static_cast<const char*>(output_var_name.c_str()),
0,
0,
0,
0,
type);
}
graph->AddNode(output_var_name);
......
......@@ -76,9 +76,10 @@ bool SubgraphEngine::BuildDeviceProgram() {
return false;
}
}
std::string net_name = "bmnetc_f32umodel";
std::string net_name = "bmnet_f32bmodel";
auto unique_net_name = lite::subgraph::bm::UniqueName(net_name);
__bmcompile_opt(
graph.GetCompilerHandle(), const_cast<char*>(net_name.c_str()), 1);
graph.GetCompilerHandle(), const_cast<char*>(unique_net_name.c_str()), 2);
void* bmodel_data = nullptr;
unsigned int data_size = 0;
bm_hd_ = static_cast<bm_handle_t>(ctx.GetHandle());
......
......@@ -43,7 +43,7 @@ function prepare_thirdparty {
# clone bmlibs
if [ ! -d ${workspace}/third-party/bmlibs ]; then
git clone https://github.com/AnBaolei1984/bmlibs.git ${workspace}/third-party/bmlibs
fi
fi
}
# for code gen, a source file is generated after a test, but is dependended by some targets in cmake.
......@@ -70,6 +70,13 @@ function build_bm {
mkdir -p $build_dir
cd $build_dir
if [ $TARGET_NAME == "BM1684" ]; then
BM_SDK_ROOT="$workspace/third-party/bmlibs/bm_sc5_libs"
else
BM_SDK_ROOT="$workspace/third-party/bmlibs/bm_sc3_libs"
fi
echo $BM_SDK_ROOT
prepare_workspace
cmake .. \
${CMAKE_COMMON_OPTIONS} \
......@@ -95,17 +102,7 @@ function main {
case $i in
--target_name=*)
TARGET_NAME="${i#*=}"
shift
;;
#--bm_sdk_root=*)
# BM_SDK_ROOT="${i#*=}"
# shift
# ;;
bm)
build_bm
shift
;;
*)
# unknown option
print_usage
exit 1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册