提交 360937e9 编写于 作者: 叶剑武

Merge branch 'rm-benchmark-example' into 'master'

Remove out-dated code

See merge request !306
# Examples
load("//mace:mace.bzl", "if_android", "if_neon_enabled", "if_openmp_enabled")
cc_binary(
name = "helloworld",
srcs = [
"helloworld.cc",
],
linkopts = if_openmp_enabled(["-fopenmp"]),
deps = [
"//mace/core",
"//mace/ops",
],
)
cc_test(
name = "benchmark_example",
testonly = 1,
srcs = ["benchmark_example.cc"],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
"//mace/core",
"//mace/core:test_benchmark_main",
],
)
load("//mace:mace.bzl", "if_openmp_enabled")
cc_binary(
name = "mace_run",
......@@ -31,7 +7,7 @@ cc_binary(
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
"//mace/codegen:generated_models",
"//external:gflags_nothreads",
"//mace/codegen:generated_models",
],
)
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/core/testing/test_benchmark.h"
static void foo(int iters) {
static const int N = 32;
const int64_t tot = static_cast<int64_t>(iters) * N;
mace::testing::MaccProcessed(tot);
mace::testing::BytesProcessed(tot * (sizeof(float)));
float *inp = new float[N];
float *out = new float[N];
while (iters--) {
for (int i = 0; i < N; i++) {
out[i] = inp[i] * 2.0;
}
}
delete[] inp;
delete[] out;
}
BENCHMARK(foo);
static void bar(int iters, int n) {
const int64_t tot = static_cast<int64_t>(iters) * n;
mace::testing::MaccProcessed(tot);
mace::testing::BytesProcessed(tot * (sizeof(float)));
float *inp = new float[n];
float *out = new float[n];
while (iters--) {
for (int i = 0; i < n; i++) {
out[i] = inp[i] * 2.0;
}
}
delete[] inp;
delete[] out;
}
BENCHMARK(bar)->Arg(32)->Arg(64)->Arg(128);
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/core/net.h"
#include "mace/core/runtime/opencl/opencl_runtime.h"
using namespace mace;
int main() {
// Construct graph
OperatorDef op_def_0;
op_def_0.add_input("Input");
op_def_0.add_output("Output0");
op_def_0.set_name("ReluTest0");
op_def_0.set_type("Relu");
auto arg_0 = op_def_0.add_arg();
arg_0->set_name("arg0");
arg_0->set_f(0.5);
OperatorDef op_def_1;
op_def_1.add_input("Input");
op_def_1.add_output("Output1");
op_def_1.set_name("ReluTest1");
op_def_1.set_type("Relu");
auto arg_1 = op_def_1.add_arg();
arg_1->set_name("arg0");
arg_1->set_f(1.5);
OperatorDef op_def_2;
op_def_2.add_input("Output0");
op_def_2.add_input("Output1");
op_def_2.add_output("Output2");
op_def_2.set_name("AddNTest");
op_def_2.set_type("AddN");
auto arg_2 = op_def_2.add_arg();
arg_2->set_name("arg0");
arg_2->set_f(2.5);
NetDef net_def;
net_def.set_name("NetTest");
net_def.add_op()->CopyFrom(op_def_0);
net_def.add_op()->CopyFrom(op_def_1);
net_def.add_op()->CopyFrom(op_def_2);
alignas(4) unsigned char tensor_data[] = "012345678901234567890123";
const std::vector<int64_t> dims = {1, 2, 3, 1};
ConstTensor input("Input", tensor_data, dims, DataType::DT_FLOAT);
net_def.mutable_tensors().push_back(input);
// Create workspace and input tensor
Workspace ws;
ws.LoadModelTensor(net_def, DeviceType::CPU);
// Create Net & run
auto net = CreateNet(net_def, &ws, DeviceType::CPU);
net->Run();
auto out_tensor = ws.GetTensor("Output2");
out_tensor->DebugPrint();
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册