提交 597d9217 编写于 作者: D dzhwinter

clean demo_ci

上级 dbd0075b
...@@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/channel.h"
...@@ -384,6 +386,7 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, ...@@ -384,6 +386,7 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
CreateVariables(ctx->prog_, local_scope, ctx->block_id_); CreateVariables(ctx->prog_, local_scope, ctx->block_id_);
} }
VLOG(3) << "Scope ptr " << local_scope;
for (auto& op : ctx->ops_) { for (auto& op : ctx->ops_) {
op->Run(*local_scope, place_); op->Run(*local_scope, place_);
// CheckResult(op->Type(), ctx, local_scope); // CheckResult(op->Type(), ctx, local_scope);
...@@ -445,7 +448,11 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, ...@@ -445,7 +448,11 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
VLOG(3) << "after tensor copy"; VLOG(3) << "after tensor copy";
float sum = .0; float sum = .0;
for(size_t i=0; i < check.numel(); ++i) { for(size_t i=0; i < check.numel(); ++i) {
if(std::type_index(check.type()) == std::type_index(typeid(int64_t))) {
sum += static_cast<float>(check.data<int64_t>()[i]);
} else {
sum += check.data<float>()[i]; sum += check.data<float>()[i];
}
} }
VLOG(3) << "op " << op->Type() << " output var " << var_name << " sum " << sum; VLOG(3) << "op " << op->Type() << " output var " << var_name << " sum " << sum;
} }
......
...@@ -62,7 +62,7 @@ static DDim GetDims(const Scope& scope, const std::string& name, ...@@ -62,7 +62,7 @@ static DDim GetDims(const Scope& scope, const std::string& name,
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>(); const LoDTensor& tensor = var->Get<LoDTensor>();
if (UNLIKELY(!tensor.IsInitialized())) { if (!tensor.IsInitialized()) {
return DDim({-1}); return DDim({-1});
} }
return tensor.dims(); return tensor.dims();
...@@ -91,13 +91,13 @@ static std::string GetDtype(const Scope& scope, const std::string& name) { ...@@ -91,13 +91,13 @@ static std::string GetDtype(const Scope& scope, const std::string& name) {
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>(); const LoDTensor& tensor = var->Get<LoDTensor>();
if (UNLIKELY(!tensor.IsInitialized())) { if (!tensor.IsInitialized()) {
return ""; return "";
} }
return DataTypeToString(ToDataType(tensor.type())); return DataTypeToString(ToDataType(tensor.type()));
} else if (var->IsType<SelectedRows>()) { } else if (var->IsType<SelectedRows>()) {
auto tensor = var->Get<SelectedRows>().value(); auto tensor = var->Get<SelectedRows>().value();
if (UNLIKELY(!tensor.IsInitialized())) { if (!tensor.IsInitialized()) {
return "uninited"; return "uninited";
} else { } else {
return DataTypeToString(ToDataType(tensor.type())); return DataTypeToString(ToDataType(tensor.type()));
...@@ -130,7 +130,7 @@ static LoD GetLoD(const Scope& scope, const std::string& name) { ...@@ -130,7 +130,7 @@ static LoD GetLoD(const Scope& scope, const std::string& name) {
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>(); const LoDTensor& tensor = var->Get<LoDTensor>();
if (UNLIKELY(!tensor.IsInitialized())) { if (!tensor.IsInitialized()) {
return default_lod; return default_lod;
} }
return tensor.lod(); return tensor.lod();
...@@ -206,6 +206,7 @@ const std::vector<std::string>& OperatorBase::Outputs( ...@@ -206,6 +206,7 @@ const std::vector<std::string>& OperatorBase::Outputs(
} }
std::string OperatorBase::DebugStringEx(const Scope* scope) const { std::string OperatorBase::DebugStringEx(const Scope* scope) const {
VLOG(3) << this->Type() << " scope ptr " << scope;
std::stringstream ss; std::stringstream ss;
ss << "Op(" << type_ << "), inputs:{"; ss << "Op(" << type_ << "), inputs:{";
for (auto it = inputs_.begin(); it != inputs_.end();) { for (auto it = inputs_.begin(); it != inputs_.end();) {
......
...@@ -73,10 +73,11 @@ link_directories("${PADDLE_LIB}/paddle/fluid/inference") ...@@ -73,10 +73,11 @@ link_directories("${PADDLE_LIB}/paddle/fluid/inference")
# add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) # add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
# add_library(${DEMO_NAME} ${DEMO_NAME}.cc) # add_library(${DEMO_NAME} ${DEMO_NAME}.cc)
add_library(${DEMO_NAME} SHARED ${DEMO_NAME}.cc)
add_executable(real_data_icnet_tester real_data_icnet_tester.cc) add_executable(real_data_icnet_tester real_data_icnet_tester.cc)
add_executable(test test.cc)
add_executable(thread_icnet_test thread_icnet_test.cc) # add_library(${DEMO_NAME} SHARED ${DEMO_NAME}.cc)
# add_executable(test test.cc)
# add_executable(thread_icnet_test thread_icnet_test.cc)
if(WITH_MKL) if(WITH_MKL)
include_directories("${PADDLE_LIB}/third_party/install/mklml/include") include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
...@@ -94,11 +95,7 @@ endif() ...@@ -94,11 +95,7 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
set(DEPS set(DEPS
# ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX} ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
D:/Paddle/bazel-dll/fluid_install_dir/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}
# E:/Paddle/build/paddle/fluid/inference/api/Release/libpaddle_inference_api${CMAKE_STATIC_LIBRARY_SUFFIX}
D:/Paddle/bazel-dll/paddle/fluid/inference/api/Release/libpaddle_inference_api${CMAKE_STATIC_LIBRARY_SUFFIX}
)
else() else()
set(DEPS set(DEPS
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
...@@ -129,10 +126,10 @@ if(WITH_GPU) ...@@ -129,10 +126,10 @@ if(WITH_GPU)
endif() endif()
endif() endif()
target_link_libraries(${DEMO_NAME} ${DEPS})
target_link_libraries(test ${DEMO_NAME} )
target_link_libraries(thread_icnet_test ${DEPS})
target_link_libraries(real_data_icnet_tester ${DEPS}) target_link_libraries(real_data_icnet_tester ${DEPS})
target_compile_definitions(${DEMO_NAME} PRIVATE "API_DEFINITION") # target_link_libraries(${DEMO_NAME} ${DEPS})
# target_link_libraries(test ${DEMO_NAME} )
# target_link_libraries(thread_icnet_test ${DEPS})
# target_compile_definitions(${DEMO_NAME} PRIVATE "API_DEFINITION")
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <iostream> #include <iostream>
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h"
namespace paddle { namespace paddle {
// DEFINE_string(dirname, "./lb", // DEFINE_string(dirname, "./lb",
...@@ -27,8 +28,8 @@ namespace paddle { ...@@ -27,8 +28,8 @@ namespace paddle {
NativeConfig GetConfig() { NativeConfig GetConfig() {
NativeConfig config; NativeConfig config;
// config.model_dir = FLAGS_dirname; // config.model_dir = FLAGS_dirname;
config.prog_file= "lb/__model__"; config.prog_file= "hs_lb_without_bn/__model__";
config.param_file= "lb/__params__"; config.param_file= "hs_lb_without_bn/__params__";
config.fraction_of_gpu_memory = 0.8; config.fraction_of_gpu_memory = 0.8;
config.use_gpu = true; config.use_gpu = true;
config.device = 0; config.device = 0;
...@@ -44,6 +45,7 @@ double time_diff(Time t1, Time t2) { ...@@ -44,6 +45,7 @@ double time_diff(Time t1, Time t2) {
return counter.count() / 1000.0; return counter.count() / 1000.0;
} }
void test_naive(int batch_size){ void test_naive(int batch_size){
NativeConfig config = GetConfig(); NativeConfig config = GetConfig();
auto predictor = CreatePaddlePredictor<NativeConfig>(config); auto predictor = CreatePaddlePredictor<NativeConfig>(config);
...@@ -88,10 +90,9 @@ void test_naive(int batch_size){ ...@@ -88,10 +90,9 @@ void test_naive(int batch_size){
PaddleTensor tensor_out; PaddleTensor tensor_out;
std::vector<PaddleTensor> outputs(1, tensor_out); std::vector<PaddleTensor> outputs(1, tensor_out);
predictor->Run(paddle_tensor_feeds, &outputs, batch_size); // predictor->Run(paddle_tensor_feeds, &outputs, batch_size);
std::cout << "start predict123:" << std::endl; std::cout << "start predict123:" << std::endl;
auto time1 = time(); auto time1 = time();
for(size_t i = 0; i < 1; i++) { for(size_t i = 0; i < 1; i++) {
predictor->Run(paddle_tensor_feeds, &outputs, batch_size); predictor->Run(paddle_tensor_feeds, &outputs, batch_size);
......
...@@ -42,6 +42,8 @@ class FetchOp : public framework::OperatorBase { ...@@ -42,6 +42,8 @@ class FetchOp : public framework::OperatorBase {
"Cannot find out_var in scope, out_var_name is %s", "Cannot find out_var in scope, out_var_name is %s",
out_name); out_name);
VLOG(3) << "fetch_var ptr " << fetch_var << " is " << (fetch_var == nullptr);
VLOG(3) << "out_var ptr " << out_var << " is " << (out_var == nullptr);
auto col = static_cast<size_t>(Attr<int>("col")); auto col = static_cast<size_t>(Attr<int>("col"));
auto *fetch_list = out_var->GetMutable<framework::FeedFetchList>(); auto *fetch_list = out_var->GetMutable<framework::FeedFetchList>();
......
...@@ -67,7 +67,11 @@ class LoadCombineOp : public framework::OperatorBase { ...@@ -67,7 +67,11 @@ class LoadCombineOp : public framework::OperatorBase {
framework::TensorCopy(*tensor, platform::CPUPlace(), dev_ctx, &check); framework::TensorCopy(*tensor, platform::CPUPlace(), dev_ctx, &check);
float sum = .0; float sum = .0;
for(size_t i=0; i < check.numel(); ++i) { for(size_t i=0; i < check.numel(); ++i) {
if(std::type_index(check.type()) == std::type_index(typeid(int64_t))) {
sum += static_cast<float>(check.data<int64_t>()[i]);
} else {
sum += check.data<float>()[i]; sum += check.data<float>()[i];
}
} }
VLOG(3) << "sum result" << sum; VLOG(3) << "sum result" << sum;
auto in_dtype = framework::ToDataType(tensor->type()); auto in_dtype = framework::ToDataType(tensor->type());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册