未验证 提交 8a6b4640 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #13295 from luotao1/anakin_ner

refine anakin related codes
......@@ -48,7 +48,7 @@ ExternalProject_Add(
${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS ${MKLML_PROJECT}
GIT_REPOSITORY "https://github.com/PaddlePaddle/Anakin"
GIT_TAG "9424277cf9ae180a14aff09560d3cd60a49c76d2"
GIT_TAG "3c8554f4978628183566ab7dd6c1e7e66493c7cd"
PREFIX ${ANAKIN_SOURCE_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS ${CMAKE_ARGS_PREFIX}
......
......@@ -150,7 +150,7 @@ if (WITH_ANAKIN AND WITH_MKL)
SRCS
${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api
${ANAKIN_INSTALL_DIR} # anakin release
DSTS ${dst_dir}/inference/anakin ${dst_dir}/inference/anakin)
DSTS ${dst_dir}/inference/anakin ${FLUID_INSTALL_DIR}/third_party/install/anakin)
list(APPEND inference_deps anakin_inference_lib)
endif()
......
......@@ -20,71 +20,16 @@ limitations under the License. */
#include <iostream>
#include <thread> // NOLINT
#include <vector>
#include "framework/core/net/net.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/timer.h"
#include "utils/logger/logger.h"
DEFINE_string(model, "", "Directory of the inference model.");
DEFINE_string(datapath, "", "Path of the dataset.");
DEFINE_int32(batch_size, 1, "batch size.");
DEFINE_int32(repeat, 1, "Running the inference program repeat times.");
// Timer for timer
class Timer {
public:
double start;
double startu;
void tic() {
struct timeval tp;
gettimeofday(&tp, NULL);
start = tp.tv_sec;
startu = tp.tv_usec;
}
double toc() {
struct timeval tp;
gettimeofday(&tp, NULL);
double used_time_ms =
(tp.tv_sec - start) * 1000.0 + (tp.tv_usec - startu) / 1000.0;
return used_time_ms;
}
};
std::vector<std::string> string_split(std::string in_str,
std::string delimiter) {
std::vector<std::string> seq;
int found = in_str.find(delimiter);
int pre_found = -1;
while (found != std::string::npos) {
if (pre_found == -1) {
seq.push_back(in_str.substr(0, found));
} else {
seq.push_back(in_str.substr(pre_found + delimiter.length(),
found - delimiter.length() - pre_found));
}
pre_found = found;
found = in_str.find(delimiter, pre_found + delimiter.length());
}
seq.push_back(
in_str.substr(pre_found + 1, in_str.length() - (pre_found + 1)));
return seq;
}
std::vector<std::string> string_split(
std::string in_str, std::vector<std::string>& delimiter) { // NOLINT
std::vector<std::string> in;
std::vector<std::string> out;
out.push_back(in_str);
for (auto del : delimiter) {
in = out;
out.clear();
for (auto s : in) {
auto out_s = string_split(s, del);
for (auto o : out_s) {
out.push_back(o);
}
}
}
return out;
}
class Data {
public:
Data(std::string file_name, int batch_size)
......@@ -120,36 +65,24 @@ void Data::get_batch_data(
week_fea.clear();
time_fea.clear();
while (_file.getline(buf, 10000)) {
std::string s = buf;
std::vector<std::string> deli_vec = {":"};
std::vector<std::string> data_vec = string_split(s, deli_vec);
std::vector<std::string> data_vec;
paddle::inference::split(buf, ':', &data_vec);
std::vector<std::string> seq;
seq = string_split(data_vec[0], {"|"});
paddle::inference::split(data_vec[0], '|', &seq);
for (auto link : seq) {
std::vector<std::string> data = string_split(link, ",");
std::vector<float> vec;
for (int i = 0; i < data.size(); i++) {
vec.push_back(atof(data[i].c_str()));
}
paddle::inference::split_to_float(link, ',', &vec);
fea.push_back(vec);
}
std::vector<std::string> week_data;
std::vector<std::string> time_data;
week_data = string_split(data_vec[2], ",");
std::vector<float> vec_w;
for (int i = 0; i < week_data.size(); i++) {
vec_w.push_back(atof(week_data[i].c_str()));
}
paddle::inference::split_to_float(data_vec[2], ',', &vec_w);
week_fea.push_back(vec_w);
time_data = string_split(data_vec[1], ",");
std::vector<float> vec_t;
for (int i = 0; i < time_data.size(); i++) {
vec_t.push_back(atof(time_data[i].c_str()));
}
paddle::inference::split_to_float(data_vec[1], ',', &vec_t);
time_fea.push_back(vec_t);
cum += seq.size();
......@@ -275,14 +208,13 @@ void single_test() {
inputs.push_back(tensor_2);
inputs.push_back(tensor_0);
Timer timer;
paddle::inference::Timer timer;
timer.tic();
for (int i = 0; i < FLAGS_repeat; i++) predictor->Run(inputs, &outputs);
LOG(INFO) << "batch_size = " << FLAGS_batch_size
<< ", repeat = " << FLAGS_repeat
<< ", sequence_length = " << seq_offset[seq_offset.size() - 1]
<< ", latency: " << timer.toc() / FLAGS_repeat << "ms";
paddle::inference::PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0,
timer.toc() / FLAGS_repeat);
LOG(INFO) << "sequence_length = " << seq_offset[seq_offset.size() - 1];
float* data_o = static_cast<float*>(outputs[0].data.data());
VLOG(3) << "outputs[0].data.length() = " << outputs[0].data.length();
......
......@@ -547,7 +547,7 @@ function gen_capi_package() {
rm -rf $install_prefix
make DESTDIR="$install_prefix" install
cd $install_prefix/usr/local
ls | egrep -v "^Found.*item$" | xargs tar -cf ${PADDLE_ROOT}/build/paddle.tgz
ls | egrep -v "^Found.*item$" | xargs tar -czf ${PADDLE_ROOT}/build/paddle.tgz
fi
}
......
......@@ -4500,7 +4500,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
"""
if not (isinstance(shape, list) or isinstance(shape, tuple)):
raise ValueError("Input shape must be a python lsit or tuple.")
raise ValueError("Input shape must be a python list or tuple.")
inputs = {"X": x}
if isinstance(actual_shape, Variable):
inputs["Shape"] = actual_shape
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册