提交 01f955ec 编写于 作者: G guru4elephant

refine python API folder structure

seperate client and server: we may support multiple version of client, but server can be only one version
上级 c928fcd1
...@@ -137,4 +137,4 @@ add_subdirectory(python) ...@@ -137,4 +137,4 @@ add_subdirectory(python)
set(PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE}) set(PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE})
set(PYTHON_LIBRARIES ${PYTHON_LIB}) set(PYTHON_LIBRARIES ${PYTHON_LIB})
add_subdirectory(examples) #add_subdirectory(examples)
engines {
name: "image_classification_resnet"
type: "FLUID_CPU_ANALYSIS_DIR"
reloadable_meta: "./data/model/paddle/fluid_time_file"
reloadable_type: "timestamp_ne"
model_data_path: "./data/model/paddle/fluid/SE_ResNeXt50_32x4d"
runtime_thread_num: 0
batch_infer_size: 0
enable_batch_align: 0
enable_memory_optimization: true
static_optimization: false
force_update_static_cache: false
}
engines { engines {
name: "general_model" name: "general_model"
type: "FLUID_CPU_ANALYSIS_DIR" type: "FLUID_CPU_ANALYSIS_DIR"
...@@ -22,39 +8,3 @@ engines { ...@@ -22,39 +8,3 @@ engines {
batch_infer_size: 0 batch_infer_size: 0
enable_batch_align: 0 enable_batch_align: 0
} }
engines {
name: "text_classification_bow"
type: "FLUID_CPU_ANALYSIS_DIR"
reloadable_meta: "./data/model/paddle/fluid_time_file"
reloadable_type: "timestamp_ne"
model_data_path: "./data/model/paddle/fluid/text_classification_lstm"
runtime_thread_num: 0
batch_infer_size: 0
enable_batch_align: 0
}
engines {
name: "ctr_prediction"
type: "FLUID_CPU_ANALYSIS_DIR"
reloadable_meta: "./data/model/paddle/fluid_time_file"
reloadable_type: "timestamp_ne"
model_data_path: "./data/model/paddle/fluid/ctr_prediction"
runtime_thread_num: 0
batch_infer_size: 0
enable_batch_align: 0
sparse_param_service_type: REMOTE
sparse_param_service_table_name: "test_dict"
}
engines {
name: "bert"
type: "FLUID_CPU_ANALYSIS_DIR"
reloadable_meta: "./data/model/paddle/fluid_time_file"
reloadable_type: "timestamp_ne"
model_data_path: "./data/model/paddle/fluid/bert_cased_L-12_H-768_A-12"
runtime_thread_num: 0
batch_infer_size: 0
enable_batch_align: 0
enable_memory_optimization: true
}
services { port: 9292
name: "BuiltinDenseFormatService"
workflows: "workflow1"
}
services {
name: "BuiltinSparseFormatService"
workflows: "workflow2"
}
services {
name: "BuiltinTestEchoService"
workflows: "workflow3"
}
services {
name: "ImageClassifyService"
workflows: "workflow4"
}
services {
name: "BuiltinFluidService"
workflows: "workflow5"
}
services {
name: "TextClassificationService"
workflows: "workflow6"
}
services {
name: "EchoKVDBService"
workflows: "workflow7"
}
services {
name: "CTRPredictionService"
workflows: "workflow8"
}
services {
name: "BertService"
workflows: "workflow9"
}
services { services {
name: "GeneralModelService" name: "GeneralModelService"
workflows: "workflow11" workflows: "workflow11"
......
workflows {
name: "workflow1"
workflow_type: "Sequence"
nodes {
name: "dense_echo_op"
type: "DenseEchoOp"
}
}
workflows {
name: "workflow2"
workflow_type: "Sequence"
nodes {
name: "sparse_echo_op"
type: "SparseEchoOp"
dependencies {
name: "startup_op"
mode: "RO"
}
}
}
workflows {
name: "workflow3"
workflow_type: "Sequence"
nodes {
name: "echo_op"
type: "CommonEchoOp"
}
}
workflows {
name: "workflow4"
workflow_type: "Sequence"
nodes {
name: "image_reader_op"
type: "ReaderOp"
}
nodes {
name: "image_classify_op"
type: "ClassifyOp"
dependencies {
name: "image_reader_op"
mode: "RO"
}
}
nodes {
name: "write_json_op"
type: "WriteJsonOp"
dependencies {
name: "image_classify_op"
mode: "RO"
}
}
}
workflows {
name: "workflow5"
workflow_type: "Sequence"
nodes {
name: "int64tensor_echo_op"
type: "Int64TensorEchoOp"
}
}
workflows {
name: "workflow6"
workflow_type: "Sequence"
nodes {
name: "text_classify_op"
type: "TextClassificationOp"
}
}
workflows {
name: "workflow7"
workflow_type: "Sequence"
nodes {
name: "echo_kvdb_service_op"
type: "KVDBEchoOp"
}
}
workflows {
name: "workflow8"
workflow_type: "Sequence"
nodes {
name: "ctr_prediction_service_op"
type: "CTRPredictionOp"
}
}
workflows {
name: "workflow9"
workflow_type: "Sequence"
nodes {
name: "bert_service_op"
type: "BertServiceOp"
}
}
workflows {
name: "workflow10"
workflow_type: "Sequence"
nodes {
name: "general_model_op"
type: "GeneralModelOp"
}
}
workflows { workflows {
name: "workflow11" name: "workflow11"
workflow_type: "Sequence" workflow_type: "Sequence"
......
13d73780-de4f-4b8c-9040-34e5adc9f9ae
此差异已折叠。
# This is a RocksDB option file.
#
# For detailed file format spec, please refer to the example file
# in examples/rocksdb_option_file_example.ini
#
[Version]
rocksdb_version=6.2.4
options_file_version=1.1
[DBOptions]
avoid_unnecessary_blocking_io=false
allow_mmap_reads=false
allow_fallocate=true
WAL_size_limit_MB=0
writable_file_max_buffer_size=1048576
allow_mmap_writes=false
allow_concurrent_memtable_write=true
use_direct_reads=false
max_open_files=-1
strict_bytes_per_sync=false
db_write_buffer_size=0
max_background_jobs=2
WAL_ttl_seconds=0
enable_thread_tracking=false
error_if_exists=false
is_fd_close_on_exec=true
recycle_log_file_num=0
max_manifest_file_size=1073741824
skip_log_error_on_recovery=false
skip_stats_update_on_db_open=false
max_total_wal_size=0
new_table_reader_for_compaction_inputs=false
manual_wal_flush=false
compaction_readahead_size=0
atomic_flush=false
random_access_max_buffer_size=1048576
create_missing_column_families=false
wal_bytes_per_sync=0
use_adaptive_mutex=false
use_direct_io_for_flush_and_compaction=false
max_background_compactions=-1
advise_random_on_open=true
base_background_compactions=-1
max_background_flushes=-1
two_write_queues=false
table_cache_numshardbits=6
keep_log_file_num=1000
write_thread_slow_yield_usec=3
stats_dump_period_sec=600
avoid_flush_during_recovery=false
log_file_time_to_roll=0
delayed_write_rate=16777216
manifest_preallocation_size=4194304
paranoid_checks=true
max_log_file_size=0
allow_2pc=false
wal_dir=kvdb
db_log_dir=
max_subcompactions=1
create_if_missing=true
enable_pipelined_write=false
bytes_per_sync=0
stats_persist_period_sec=600
stats_history_buffer_size=1048576
fail_if_options_file_error=false
use_fsync=false
wal_recovery_mode=kPointInTimeRecovery
delete_obsolete_files_period_micros=21600000000
enable_write_thread_adaptive_yield=true
avoid_flush_during_shutdown=false
write_thread_max_yield_usec=100
info_log_level=INFO_LEVEL
max_file_opening_threads=16
dump_malloc_stats=false
allow_ingest_behind=false
access_hint_on_compaction_start=NORMAL
preserve_deletes=false
[CFOptions "default"]
sample_for_compression=0
compaction_pri=kMinOverlappingRatio
merge_operator=nullptr
compaction_filter_factory=nullptr
memtable_factory=SkipListFactory
memtable_insert_with_hint_prefix_extractor=nullptr
comparator=leveldb.BytewiseComparator
target_file_size_base=67108864
max_sequential_skip_in_iterations=8
compaction_style=kCompactionStyleLevel
max_bytes_for_level_base=268435456
bloom_locality=0
write_buffer_size=67108864
compression_per_level=
memtable_huge_page_size=0
max_successive_merges=0
arena_block_size=8388608
memtable_whole_key_filtering=false
target_file_size_multiplier=1
max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1
snap_refresh_nanos=0
num_levels=7
min_write_buffer_number_to_merge=1
max_write_buffer_number_to_maintain=0
max_write_buffer_number=2
compression=kNoCompression
level0_stop_writes_trigger=36
level0_slowdown_writes_trigger=20
compaction_filter=nullptr
level0_file_num_compaction_trigger=4
max_compaction_bytes=1677721600
compaction_options_universal={allow_trivial_move=false;size_ratio=1;min_merge_width=2;max_size_amplification_percent=200;max_merge_width=4294967295;compression_size_percent=-1;stop_style=kCompactionStopStyleTotalSize;}
memtable_prefix_bloom_size_ratio=0.000000
hard_pending_compaction_bytes_limit=274877906944
ttl=0
table_factory=BlockBasedTable
soft_pending_compaction_bytes_limit=68719476736
prefix_extractor=nullptr
bottommost_compression=kDisableCompressionOption
force_consistency_checks=false
paranoid_file_checks=false
compaction_options_fifo={allow_compaction=false;max_table_files_size=1073741824;}
max_bytes_for_level_multiplier=10.000000
optimize_filters_for_hits=false
level_compaction_dynamic_level_bytes=false
inplace_update_num_locks=10000
inplace_update_support=false
periodic_compaction_seconds=0
disable_auto_compactions=false
report_bg_io_stats=false
[TableOptions/BlockBasedTable "default"]
pin_top_level_index_and_filter=true
enable_index_compression=true
read_amp_bytes_per_bit=8589934592
format_version=2
block_align=false
metadata_block_size=4096
block_size_deviation=10
partition_filters=false
block_size=4096
index_block_restart_interval=1
no_block_cache=false
checksum=kCRC32c
whole_key_filtering=true
index_shortening=kShortenSeparators
data_block_index_type=kDataBlockBinarySearch
index_type=kBinarySearch
verify_compression=false
filter_policy=nullptr
data_block_hash_table_util_ratio=0.750000
pin_l0_filter_and_index_blocks_in_cache=false
block_restart_interval=16
cache_index_and_filter_blocks_with_high_priority=false
cache_index_and_filter_blocks=false
hash_index_allow_collision=true
flush_block_policy_factory=FlushBlockBySizePolicyFactory
# This is a RocksDB option file.
#
# For detailed file format spec, please refer to the example file
# in examples/rocksdb_option_file_example.ini
#
[Version]
rocksdb_version=6.2.4
options_file_version=1.1
[DBOptions]
avoid_unnecessary_blocking_io=false
allow_mmap_reads=false
allow_fallocate=true
WAL_size_limit_MB=0
writable_file_max_buffer_size=1048576
allow_mmap_writes=false
allow_concurrent_memtable_write=true
use_direct_reads=false
max_open_files=-1
strict_bytes_per_sync=false
db_write_buffer_size=0
max_background_jobs=2
WAL_ttl_seconds=0
enable_thread_tracking=false
error_if_exists=false
is_fd_close_on_exec=true
recycle_log_file_num=0
max_manifest_file_size=1073741824
skip_log_error_on_recovery=false
skip_stats_update_on_db_open=false
max_total_wal_size=0
new_table_reader_for_compaction_inputs=false
manual_wal_flush=false
compaction_readahead_size=0
atomic_flush=false
random_access_max_buffer_size=1048576
create_missing_column_families=false
wal_bytes_per_sync=0
use_adaptive_mutex=false
use_direct_io_for_flush_and_compaction=false
max_background_compactions=-1
advise_random_on_open=true
base_background_compactions=-1
max_background_flushes=-1
two_write_queues=false
table_cache_numshardbits=6
keep_log_file_num=1000
write_thread_slow_yield_usec=3
stats_dump_period_sec=600
avoid_flush_during_recovery=false
log_file_time_to_roll=0
delayed_write_rate=16777216
manifest_preallocation_size=4194304
paranoid_checks=true
max_log_file_size=0
allow_2pc=false
wal_dir=kvdb
db_log_dir=
max_subcompactions=1
create_if_missing=true
enable_pipelined_write=false
bytes_per_sync=0
stats_persist_period_sec=600
stats_history_buffer_size=1048576
fail_if_options_file_error=false
use_fsync=false
wal_recovery_mode=kPointInTimeRecovery
delete_obsolete_files_period_micros=21600000000
enable_write_thread_adaptive_yield=true
avoid_flush_during_shutdown=false
write_thread_max_yield_usec=100
info_log_level=INFO_LEVEL
max_file_opening_threads=16
dump_malloc_stats=false
allow_ingest_behind=false
access_hint_on_compaction_start=NORMAL
preserve_deletes=false
[CFOptions "default"]
sample_for_compression=0
compaction_pri=kMinOverlappingRatio
merge_operator=nullptr
compaction_filter_factory=nullptr
memtable_factory=SkipListFactory
memtable_insert_with_hint_prefix_extractor=nullptr
comparator=leveldb.BytewiseComparator
target_file_size_base=67108864
max_sequential_skip_in_iterations=8
compaction_style=kCompactionStyleLevel
max_bytes_for_level_base=268435456
bloom_locality=0
write_buffer_size=67108864
compression_per_level=
memtable_huge_page_size=0
max_successive_merges=0
arena_block_size=8388608
memtable_whole_key_filtering=false
target_file_size_multiplier=1
max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1
snap_refresh_nanos=0
num_levels=7
min_write_buffer_number_to_merge=1
max_write_buffer_number_to_maintain=0
max_write_buffer_number=2
compression=kNoCompression
level0_stop_writes_trigger=36
level0_slowdown_writes_trigger=20
compaction_filter=nullptr
level0_file_num_compaction_trigger=4
max_compaction_bytes=1677721600
compaction_options_universal={allow_trivial_move=false;size_ratio=1;min_merge_width=2;max_size_amplification_percent=200;max_merge_width=4294967295;compression_size_percent=-1;stop_style=kCompactionStopStyleTotalSize;}
memtable_prefix_bloom_size_ratio=0.000000
hard_pending_compaction_bytes_limit=274877906944
ttl=0
table_factory=BlockBasedTable
soft_pending_compaction_bytes_limit=68719476736
prefix_extractor=nullptr
bottommost_compression=kDisableCompressionOption
force_consistency_checks=false
paranoid_file_checks=false
compaction_options_fifo={allow_compaction=false;max_table_files_size=1073741824;}
max_bytes_for_level_multiplier=10.000000
optimize_filters_for_hits=false
level_compaction_dynamic_level_bytes=false
inplace_update_num_locks=10000
inplace_update_support=false
periodic_compaction_seconds=0
disable_auto_compactions=false
report_bg_io_stats=false
[TableOptions/BlockBasedTable "default"]
pin_top_level_index_and_filter=true
enable_index_compression=true
read_amp_bytes_per_bit=8589934592
format_version=2
block_align=false
metadata_block_size=4096
block_size_deviation=10
partition_filters=false
block_size=4096
index_block_restart_interval=1
no_block_cache=false
checksum=kCRC32c
whole_key_filtering=true
index_shortening=kShortenSeparators
data_block_index_type=kDataBlockBinarySearch
index_type=kBinarySearch
verify_compression=false
filter_policy=nullptr
data_block_hash_table_util_ratio=0.750000
pin_l0_filter_and_index_blocks_in_cache=false
block_restart_interval=16
cache_index_and_filter_blocks_with_high_priority=false
cache_index_and_filter_blocks=false
hash_index_allow_collision=true
flush_block_policy_factory=FlushBlockBySizePolicyFactory
...@@ -51,14 +51,14 @@ int GeneralModelOp::inference() { ...@@ -51,14 +51,14 @@ int GeneralModelOp::inference() {
// infer // infer
if (batch_size > 0) { if (batch_size > 0) {
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(3) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
elem_type.resize(var_num); elem_type.resize(var_num);
elem_size.resize(var_num); elem_size.resize(var_num);
capacity.resize(var_num); capacity.resize(var_num);
paddle::PaddleTensor lod_tensor; paddle::PaddleTensor lod_tensor;
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
elem_type[i] = req->insts(0).tensor_array(i).elem_type(); elem_type[i] = req->insts(0).tensor_array(i).elem_type();
VLOG(3) << "var[" << i << "] has elem type: " << elem_type[i]; VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == 0) { // int64 if (elem_type[i] == 0) { // int64
elem_size[i] = sizeof(int64_t); elem_size[i] = sizeof(int64_t);
lod_tensor.dtype = paddle::PaddleDType::INT64; lod_tensor.dtype = paddle::PaddleDType::INT64;
...@@ -70,17 +70,17 @@ int GeneralModelOp::inference() { ...@@ -70,17 +70,17 @@ int GeneralModelOp::inference() {
if (req->insts(0).tensor_array(i).shape(0) == -1) { if (req->insts(0).tensor_array(i).shape(0) == -1) {
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
lod_tensor.lod[0].push_back(0); lod_tensor.lod[0].push_back(0);
VLOG(3) << "var[" << i << "] is lod_tensor"; VLOG(2) << "var[" << i << "] is lod_tensor";
} else { } else {
lod_tensor.shape.push_back(batch_size); lod_tensor.shape.push_back(batch_size);
capacity[i] = 1; capacity[i] = 1;
for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) {
int dim = req->insts(0).tensor_array(i).shape(k); int dim = req->insts(0).tensor_array(i).shape(k);
VLOG(3) << "shape for var[" << i << "]: " << dim; VLOG(2) << "shape for var[" << i << "]: " << dim;
capacity[i] *= dim; capacity[i] *= dim;
lod_tensor.shape.push_back(dim); lod_tensor.shape.push_back(dim);
} }
VLOG(3) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
if (i == 0) { if (i == 0) {
lod_tensor.name = "words"; lod_tensor.name = "words";
...@@ -95,19 +95,19 @@ int GeneralModelOp::inference() { ...@@ -95,19 +95,19 @@ int GeneralModelOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = tensor.data_size(); int data_len = tensor.data_size();
VLOG(3) << "tensor size for var[" << i << "]: " << tensor.data_size(); VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size();
int cur_len = in->at(i).lod[0].back(); int cur_len = in->at(i).lod[0].back();
VLOG(3) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
in->at(i).lod[0].push_back(cur_len + data_len); in->at(i).lod[0].push_back(cur_len + data_len);
VLOG(3) << "new len: " << cur_len + data_len; VLOG(2) << "new len: " << cur_len + data_len;
} }
in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]);
in->at(i).shape = {in->at(i).lod[0].back(), 1}; in->at(i).shape = {in->at(i).lod[0].back(), 1};
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << in->at(i).lod[0].back(); << "] is lod_tensor and len=" << in->at(i).lod[0].back();
} else { } else {
in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]);
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is tensor and capacity=" << batch_size * capacity[i]; << "] is tensor and capacity=" << batch_size * capacity[i];
} }
} }
...@@ -144,7 +144,7 @@ int GeneralModelOp::inference() { ...@@ -144,7 +144,7 @@ int GeneralModelOp::inference() {
} }
} }
VLOG(3) << "going to infer"; VLOG(2) << "going to infer";
TensorVector *out = butil::get_object<TensorVector>(); TensorVector *out = butil::get_object<TensorVector>();
if (!out) { if (!out) {
LOG(ERROR) << "Failed get tls output object"; LOG(ERROR) << "Failed get tls output object";
...@@ -157,7 +157,7 @@ int GeneralModelOp::inference() { ...@@ -157,7 +157,7 @@ int GeneralModelOp::inference() {
for (uint32_t i = 0; i < 10; i++) { for (uint32_t i = 0; i < 10; i++) {
oss << *(example + i) << " "; oss << *(example + i) << " ";
} }
VLOG(3) << "msg: " << oss.str(); VLOG(2) << "msg: " << oss.str();
// infer // infer
if (predictor::InferManager::instance().infer( if (predictor::InferManager::instance().infer(
...@@ -167,7 +167,7 @@ int GeneralModelOp::inference() { ...@@ -167,7 +167,7 @@ int GeneralModelOp::inference() {
} }
// print response // print response
float *example_1 = reinterpret_cast<float *>((*out)[0].data.data()); float *example_1 = reinterpret_cast<float *>((*out)[0].data.data());
VLOG(3) << "result: " << *example_1; VLOG(2) << "result: " << *example_1;
Response *res = mutable_data<Response>(); Response *res = mutable_data<Response>();
......
...@@ -33,16 +33,22 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; ...@@ -33,16 +33,22 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int conf_check(const Request *req, int conf_check(const Request *req,
const std::shared_ptr<PaddleGeneralModelConfig> &model_config) { const std::shared_ptr<PaddleGeneralModelConfig> &model_config) {
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(2) << "var num: " << var_num;
if (var_num != model_config->_feed_type.size()) { if (var_num != model_config->_feed_type.size()) {
LOG(ERROR) << "feed var number not match."; LOG(ERROR) << "feed var number not match.";
return -1; return -1;
} }
VLOG(2) << "begin to checkout feed type";
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
VLOG(2) << "feed type[" << i << "]: " <<
model_config->_feed_type[i];
if (model_config->_feed_type[i] != if (model_config->_feed_type[i] !=
req->insts(0).tensor_array(i).elem_type()) { req->insts(0).tensor_array(i).elem_type()) {
LOG(ERROR) << "feed type not match."; LOG(ERROR) << "feed type not match.";
return -1; return -1;
} }
VLOG(2) << "feed shape size: " << model_config->_feed_shape[i].size();
if (model_config->_feed_shape[i].size() == if (model_config->_feed_shape[i].size() ==
req->insts(0).tensor_array(i).shape_size()) { req->insts(0).tensor_array(i).shape_size()) {
for (int j = 0; j < model_config->_feed_shape[i].size(); ++j) { for (int j = 0; j < model_config->_feed_shape[i].size(); ++j) {
...@@ -84,7 +90,7 @@ int GeneralReaderOp::inference() { ...@@ -84,7 +90,7 @@ int GeneralReaderOp::inference() {
} }
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(3) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
// read config // read config
LOG(INFO) << "start to call load general model_conf op"; LOG(INFO) << "start to call load general model_conf op";
...@@ -112,7 +118,7 @@ int GeneralReaderOp::inference() { ...@@ -112,7 +118,7 @@ int GeneralReaderOp::inference() {
paddle::PaddleTensor lod_tensor; paddle::PaddleTensor lod_tensor;
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
elem_type[i] = req->insts(0).tensor_array(i).elem_type(); elem_type[i] = req->insts(0).tensor_array(i).elem_type();
VLOG(3) << "var[" << i << "] has elem type: " << elem_type[i]; VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == 0) { // int64 if (elem_type[i] == 0) { // int64
elem_size[i] = sizeof(int64_t); elem_size[i] = sizeof(int64_t);
lod_tensor.dtype = paddle::PaddleDType::INT64; lod_tensor.dtype = paddle::PaddleDType::INT64;
...@@ -124,17 +130,17 @@ int GeneralReaderOp::inference() { ...@@ -124,17 +130,17 @@ int GeneralReaderOp::inference() {
if (req->insts(0).tensor_array(i).shape(0) == -1) { if (req->insts(0).tensor_array(i).shape(0) == -1) {
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
lod_tensor.lod[0].push_back(0); lod_tensor.lod[0].push_back(0);
VLOG(3) << "var[" << i << "] is lod_tensor"; VLOG(2) << "var[" << i << "] is lod_tensor";
} else { } else {
lod_tensor.shape.push_back(batch_size); lod_tensor.shape.push_back(batch_size);
capacity[i] = 1; capacity[i] = 1;
for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) { for (int k = 0; k < req->insts(0).tensor_array(i).shape_size(); ++k) {
int dim = req->insts(0).tensor_array(i).shape(k); int dim = req->insts(0).tensor_array(i).shape(k);
VLOG(3) << "shape for var[" << i << "]: " << dim; VLOG(2) << "shape for var[" << i << "]: " << dim;
capacity[i] *= dim; capacity[i] *= dim;
lod_tensor.shape.push_back(dim); lod_tensor.shape.push_back(dim);
} }
VLOG(3) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
if (i == 0) { if (i == 0) {
lod_tensor.name = "words"; lod_tensor.name = "words";
...@@ -149,19 +155,19 @@ int GeneralReaderOp::inference() { ...@@ -149,19 +155,19 @@ int GeneralReaderOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = tensor.data_size(); int data_len = tensor.data_size();
VLOG(3) << "tensor size for var[" << i << "]: " << tensor.data_size(); VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size();
int cur_len = in->at(i).lod[0].back(); int cur_len = in->at(i).lod[0].back();
VLOG(3) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
in->at(i).lod[0].push_back(cur_len + data_len); in->at(i).lod[0].push_back(cur_len + data_len);
VLOG(3) << "new len: " << cur_len + data_len; VLOG(2) << "new len: " << cur_len + data_len;
} }
in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]);
in->at(i).shape = {in->at(i).lod[0].back(), 1}; in->at(i).shape = {in->at(i).lod[0].back(), 1};
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << in->at(i).lod[0].back(); << "] is lod_tensor and len=" << in->at(i).lod[0].back();
} else { } else {
in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]);
VLOG(3) << "var[" << i VLOG(2) << "var[" << i
<< "] is tensor and capacity=" << batch_size * capacity[i]; << "] is tensor and capacity=" << batch_size * capacity[i];
} }
} }
...@@ -198,14 +204,14 @@ int GeneralReaderOp::inference() { ...@@ -198,14 +204,14 @@ int GeneralReaderOp::inference() {
} }
} }
VLOG(3) << "read data from client success"; VLOG(2) << "read data from client success";
// print request // print request
std::ostringstream oss; std::ostringstream oss;
int64_t *example = reinterpret_cast<int64_t *>((*in)[0].data.data()); int64_t *example = reinterpret_cast<int64_t *>((*in)[0].data.data());
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
oss << *(example + i) << " "; oss << *(example + i) << " ";
} }
VLOG(3) << "head element of first feed var : " << oss.str(); VLOG(2) << "head element of first feed var : " << oss.str();
// //
return 0; return 0;
} }
......
if (CLIENT_ONLY) if (CLIENT_ONLY)
file(GLOB_RECURSE SERVING_CLIENT_PY_FILES serving_client/*.py) file(GLOB_RECURSE SERVING_CLIENT_PY_FILES paddle_serving_client/*.py)
set(PY_FILES ${SERVING_CLIENT_PY_FILES}) set(PY_FILES ${SERVING_CLIENT_PY_FILES})
SET(PACKAGE_NAME "serving_client") SET(PACKAGE_NAME "serving_client")
set(SETUP_LOG_FILE "setup.py.client.log") set(SETUP_LOG_FILE "setup.py.client.log")
endif() endif()
if (NOT CLIENT_ONLY) if (NOT CLIENT_ONLY)
file(GLOB_RECURSE SERVING_SERVER_PY_FILES serving_server/*.py) file(GLOB_RECURSE SERVING_SERVER_PY_FILES paddle_serving_server/*.py)
set(PY_FILES ${SERVING_SERVER_PY_FILES}) set(PY_FILES ${SERVING_SERVER_PY_FILES})
SET(PACKAGE_NAME "serving_server") SET(PACKAGE_NAME "serving_server")
set(SETUP_LOG_FILE "setup.py.server.log") set(SETUP_LOG_FILE "setup.py.server.log")
...@@ -29,8 +29,8 @@ message("python env: " ${py_env}) ...@@ -29,8 +29,8 @@ message("python env: " ${py_env})
if (CLIENT_ONLY) if (CLIENT_ONLY)
add_custom_command( add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving/ ${PADDLE_SERVING_BINARY_DIR}/python/ COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_client/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/serving_client/ COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_SERVING_BINARY_DIR}/core/general-client/serving_client.so ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES}) DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp) add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
...@@ -39,7 +39,7 @@ endif() ...@@ -39,7 +39,7 @@ endif()
if (NOT CLIENT_ONLY) if (NOT CLIENT_ONLY)
add_custom_command( add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving/ ${PADDLE_SERVING_BINARY_DIR}/python/ COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
......
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
# limitations under the License. # limitations under the License.
from .serving_client import PredictorClient from .serving_client import PredictorClient
from ..proto import sdk_configure_pb2 as sdk from .proto import sdk_configure_pb2 as sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import time import time
int_type = 0 int_type = 0
...@@ -74,34 +76,25 @@ class Client(object): ...@@ -74,34 +76,25 @@ class Client(object):
self.feed_names_to_idx_ = {} self.feed_names_to_idx_ = {}
def load_client_config(self, path): def load_client_config(self, path):
model_conf = m_config.GeneralModelConfig()
f = open(path, 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
# load configuraion here # load configuraion here
# get feed vars, fetch vars # get feed vars, fetch vars
# get feed shapes, feed types # get feed shapes, feed types
# map feed names to index # map feed names to index
self.client_handle_ = PredictorClient() self.client_handle_ = PredictorClient()
self.client_handle_.init(path) self.client_handle_.init(path)
self.feed_names_ = [] self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.fetch_names_ = [] self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
self.feed_shapes_ = [] self.feed_shapes_ = [var.shape for var in model_conf.feed_var]
self.feed_types_ = {}
self.feed_names_to_idx_ = {} self.feed_names_to_idx_ = {}
for i, var in enumerate(model_conf.feed_var):
self.feed_names_to_idx_[var.alias_name] = i
self.feed_types_[var.alias_name] = var.feed_type
with open(path) as fin:
group = fin.readline().strip().split()
feed_num = int(group[0])
fetch_num = int(group[1])
for i in range(feed_num):
group = fin.readline().strip().split()
self.feed_names_.append(group[0])
tmp_shape = []
for s in group[2:-1]:
tmp_shape.append(int(s))
self.feed_shapes_.append(tmp_shape)
self.feed_types_[group[0]] = int(group[-1])
self.feed_names_to_idx_[group[0]] = i
for i in range(fetch_num):
group = fin.readline().strip().split()
self.fetch_names_.append(group[0])
return return
def connect(self, endpoints): def connect(self, endpoints):
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid import Executor from paddle.fluid import Executor
from paddle.fluid.compiler import CompiledProgram from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.framework import Program from paddle.fluid.framework import core
from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import Program
from paddle.fluid import CPUPlace from paddle.fluid import CPUPlace
from paddle.fluid.io import save_persistables from paddle.fluid.io import save_persistables
from ..proto import general_model_config_pb2 as model_conf
import os import os
def save_model(server_model_folder, def save_model(server_model_folder,
...@@ -40,25 +41,42 @@ def save_model(server_model_folder, ...@@ -40,25 +41,42 @@ def save_model(server_model_folder,
save_persistables(executor, server_model_folder, save_persistables(executor, server_model_folder,
main_program) main_program)
config = model_conf.GeneralModelConfig()
for key in feed_var_dict:
feed_var = model_conf.FeedVar()
feed_var.alias_name = key
feed_var.name = feed_var_dict[key].name
feed_var.is_lod_tensor = feed_var_dict[key].lod_level == 1
if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \
feed_var_dict[key].dtype == core.VarDesc.VarType.INT64:
feed_var.feed_type = 0
if feed_var_dict[key].dtype == core.VarDesc.VarType.FP32:
feed_var.feed_type = 1
if feed_var.is_lod_tensor:
feed_var.shape.extend([-1])
else:
tmp_shape = []
for v in feed_var_dict[key].shape:
if v >= 0:
tmp_shape.append(v)
feed_var.shape.extend(tmp_shape)
config.feed_var.extend([feed_var])
for key in fetch_var_dict:
fetch_var = model_conf.FetchVar()
fetch_var.alias_name = key
fetch_var.name = fetch_var_dict[key].name
fetch_var.shape.extend(fetch_var_dict[key].shape)
config.fetch_var.extend([fetch_var])
cmd = "mkdir -p {}".format(client_config_folder) cmd = "mkdir -p {}".format(client_config_folder)
os.system(cmd) os.system(cmd)
with open("{}/client.conf".format(client_config_folder), "w") as fout: with open("{}/serving_client_conf.prototxt", "w") as fout:
fout.write("{} {}\n".format(len(feed_var_dict), len(fetch_var_dict))) fout.write(str(config))
for key in feed_var_dict: with open("{}/serving_server_conf.prototxt", "w") as fout:
fout.write("{}".format(key)) fout.write(str(config))
if feed_var_dict[key].lod_level == 1:
fout.write(" 1 -1\n")
elif feed_var_dict[key].lod_level == 0:
fout.write(" {}".format(len(feed_var_dict[key].shape)))
for dim in feed_var_dict[key].shape:
fout.write(" {}".format(dim))
fout.write("\n")
for key in fetch_var_dict:
fout.write("{} {}\n".format(key, fetch_var_dict[key].name))
cmd = "cp {}/client.conf {}/server.conf".format(
client_config_folder, server_model_folder)
os.system(cmd)
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
# limitations under the License. # limitations under the License.
import os import os
from ..proto import server_configure_pb2 as server_sdk from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
class OpMaker(object): class OpMaker(object):
def __init__(self): def __init__(self):
...@@ -58,10 +60,13 @@ class Server(object): ...@@ -58,10 +60,13 @@ class Server(object):
self.model_toolkit_conf = None self.model_toolkit_conf = None
self.resource_conf = None self.resource_conf = None
self.engine = None self.engine = None
self.memory_optimization = False
self.model_conf = None
self.workflow_fn = "workflow.prototxt" self.workflow_fn = "workflow.prototxt"
self.resource_fn = "resource.prototxt" self.resource_fn = "resource.prototxt"
self.infer_service_fn = "infer_service.prototxt" self.infer_service_fn = "infer_service.prototxt"
self.model_toolkit_fn = "model_toolkit.prototxt" self.model_toolkit_fn = "model_toolkit.prototxt"
self.general_model_config_fn = "general_model.prototxt"
self.workdir = "" self.workdir = ""
self.max_concurrency = 0 self.max_concurrency = 0
self.num_threads = 0 self.num_threads = 0
...@@ -83,11 +88,17 @@ class Server(object): ...@@ -83,11 +88,17 @@ class Server(object):
def set_op_sequence(self, op_seq): def set_op_sequence(self, op_seq):
self.workflow_conf = op_seq self.workflow_conf = op_seq
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def _prepare_engine(self, model_config_path, device): def _prepare_engine(self, model_config_path, device):
if self.model_toolkit_conf == None: if self.model_toolkit_conf == None:
self.model_toolkit_conf = server_sdk.ModelToolkitConf() self.model_toolkit_conf = server_sdk.ModelToolkitConf()
if self.engine == None: if self.engine == None:
self.engine = server_sdk.EngineDesc() self.engine = server_sdk.EngineDesc()
self.model_config_path = model_config_path
self.engine.name = "general_model" self.engine.name = "general_model"
self.engine.reloadable_meta = model_config_path + "/fluid_time_file" self.engine.reloadable_meta = model_config_path + "/fluid_time_file"
self.engine.reloadable_type = "timestamp_ne" self.engine.reloadable_type = "timestamp_ne"
...@@ -95,13 +106,15 @@ class Server(object): ...@@ -95,13 +106,15 @@ class Server(object):
self.engine.batch_infer_size = 0 self.engine.batch_infer_size = 0
self.engine.enable_batch_align = 0 self.engine.enable_batch_align = 0
self.engine.model_data_path = model_config_path self.engine.model_data_path = model_config_path
self.engine.enable_memory_optimization = True self.engine.enable_memory_optimization = self.memory_optimization
self.engine.static_optimization = False self.engine.static_optimization = False
self.engine.force_update_static_cache = False self.engine.force_update_static_cache = False
if device == "cpu": if device == "cpu":
self.engine.type = "FLUID_CPU_ANALYSIS_DIR" self.engine.type = "FLUID_CPU_ANALYSIS_DIR"
elif device == "gpu": elif device == "gpu":
self.engine.type = "FLUID_GPU_ANALYSIS_DIR" self.engine.type = "FLUID_GPU_ANALYSIS_DIR"
self.model_toolkit_conf.engines.extend([self.engine]) self.model_toolkit_conf.engines.extend([self.engine])
def _prepare_infer_service(self, port): def _prepare_infer_service(self, port):
...@@ -115,18 +128,26 @@ class Server(object): ...@@ -115,18 +128,26 @@ class Server(object):
def _prepare_resource(self, workdir): def _prepare_resource(self, workdir):
if self.resource_conf == None: if self.resource_conf == None:
with open("{}/{}".format(workdir, self.general_model_config_fn), "w") as fout:
fout.write(str(self.model_conf))
self.resource_conf = server_sdk.ResourceConf() self.resource_conf = server_sdk.ResourceConf()
self.resource_conf.model_toolkit_path = workdir self.resource_conf.model_toolkit_path = workdir
self.resource_conf.model_toolkit_file = self.model_toolkit_fn self.resource_conf.model_toolkit_file = self.model_toolkit_fn
self.resource_conf.general_model_path = workdir
self.resource_conf.general_model_file = self.general_model_config_fn
def _write_pb_str(self, filepath, pb_obj): def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout: with open(filepath, "w") as fout:
fout.write(str(pb_obj)) fout.write(str(pb_obj))
def load_model_config(self, path): def load_model_config(self, path):
self.config_file = "{}/inference.conf".format(path)
self.model_config_path = path self.model_config_path = path
self.model_conf = m_config.GeneralModelConfig()
f = open("{}/serving_server_conf.prototxt".format(path), 'r')
self.model_conf = google.protobuf.text_format.Merge(
str(f.read()), self.model_conf)
# check config here # check config here
# print config here
def prepare_server(self, workdir=None, port=9292, device="cpu"): def prepare_server(self, workdir=None, port=9292, device="cpu"):
if workdir == None: if workdir == None:
...@@ -154,7 +175,7 @@ class Server(object): ...@@ -154,7 +175,7 @@ class Server(object):
def run_server(self): def run_server(self):
# just run server with system command # just run server with system command
# currently we do not load cube # currently we do not load cube
command = "/home/users/dongdaxiang/github_develop/Serving/examples/demo-serving/serving" \ command = "/home/users/dongdaxiang/github_develop/Serving/build_server/core/general-server" \
" -enable_model_toolkit " \ " -enable_model_toolkit " \
"-inferservice_path {} " \ "-inferservice_path {} " \
"-inferservice_file {} " \ "-inferservice_file {} " \
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
...@@ -11,4 +11,7 @@ ...@@ -11,4 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .io import save_model """ Paddle Serving Client version string """
serving_client_version = "0.1.0"
serving_server_version = "0.1.0"
module_proto_version = "0.1.0"
...@@ -21,7 +21,7 @@ import platform ...@@ -21,7 +21,7 @@ import platform
from setuptools import setup, Distribution, Extension from setuptools import setup, Distribution, Extension
from setuptools import find_packages from setuptools import find_packages
from setuptools import setup from setuptools import setup
from paddle_serving.version import serving_client_version from paddle_serving_client.version import serving_client_version
def python_version(): def python_version():
return [int(v) for v in platform.python_version().split(".")] return [int(v) for v in platform.python_version().split(".")]
...@@ -32,17 +32,16 @@ REQUIRED_PACKAGES = [ ...@@ -32,17 +32,16 @@ REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle' 'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle'
] ]
packages=['paddle_serving', packages=['paddle_serving_client',
'paddle_serving.serving_client', 'paddle_serving_client.proto',
'paddle_serving.proto', 'paddle_serving_client.io']
'paddle_serving.io'] package_data={'paddle_serving_client': ['serving_client.so']}
package_data={'paddle_serving.serving_client': ['serving_client.so']} package_dir={'paddle_serving_client':
package_dir={'paddle_serving.serving_client': '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client',
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/serving_client', 'paddle_serving_client.proto':
'paddle_serving.proto': '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/proto',
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/proto', 'paddle_serving_client.io':
'paddle_serving.io': '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/io'}
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/io'}
setup( setup(
name='paddle-serving-client', name='paddle-serving-client',
......
...@@ -21,8 +21,7 @@ import platform ...@@ -21,8 +21,7 @@ import platform
from setuptools import setup, Distribution, Extension from setuptools import setup, Distribution, Extension
from setuptools import find_packages from setuptools import find_packages
from setuptools import setup from setuptools import setup
from paddle_serving.version import serving_client_version from paddle_serving_server.version import serving_server_version
from paddle_serving.version import serving_server_version
def python_version(): def python_version():
return [int(v) for v in platform.python_version().split(".")] return [int(v) for v in platform.python_version().split(".")]
...@@ -33,17 +32,13 @@ REQUIRED_PACKAGES = [ ...@@ -33,17 +32,13 @@ REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle' 'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle'
] ]
packages=['paddle_serving', packages=['paddle_serving_server',
'paddle_serving.serving_server', 'paddle_serving_server.proto']
'paddle_serving.proto',
'paddle_serving.io'] package_dir={'paddle_serving_server':
package_data={'paddle_serving.serving_server': ['serving_server.so']} '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server',
package_dir={'paddle_serving.serving_server': 'paddle_serving_server.proto':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/serving_server', '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto'}
'paddle_serving.proto':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/proto',
'paddle_serving.io':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving/io'}
setup( setup(
name='paddle-serving-server', name='paddle-serving-server',
...@@ -55,7 +50,6 @@ setup( ...@@ -55,7 +50,6 @@ setup(
author_email='guru4elephant@gmail.com', author_email='guru4elephant@gmail.com',
install_requires=REQUIRED_PACKAGES, install_requires=REQUIRED_PACKAGES,
packages=packages, packages=packages,
package_data=package_data,
package_dir=package_dir, package_dir=package_dir,
# PyPI package information. # PyPI package information.
classifiers=[ classifiers=[
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册