提交 8a48acd6 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #212 from MRXLT/general-server

bug fix 
......@@ -51,6 +51,7 @@ option(WITH_MKL "Compile Paddle Serving with MKL support." OFF)
option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" OFF)
option(CLIENT_ONLY "Compile client libraries and demos only" OFF)
option(WITH_ELASTIC_CTR "Compile ELASITC-CTR solution" OFF)
option(PACK "Compile for whl" OFF)
set(WITH_MKLML ${WITH_MKL})
if (NOT DEFINED WITH_MKLDNN)
......
......@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <sys/time.h>
#include "core/util/include/timer.h"
#include <sys/time.h>
namespace baidu {
namespace paddle_serving {
......@@ -56,7 +56,7 @@ double Timer::ElapsedSec() { return _elapsed / 1000000.0; }
int64_t Timer::TimeStampUS() {
gettimeofday(&_now, NULL);
return _now.tv_usec;
return _now.tv_sec * 1000 * 1000L + _now.tv_usec;
}
int64_t Timer::Tickus() {
......
......@@ -109,9 +109,10 @@ class Server(object):
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def set_local_bin(self, path):
self.use_local_bin = True
self.bin_path = path
def check_local_bin(self):
if "SERVING_BIN" in os.environ:
self.use_local_bin = True
self.bin_path = os.environ["SERVING_BIN"]
def _prepare_engine(self, model_config_path, device):
if self.model_toolkit_conf == None:
......@@ -258,10 +259,11 @@ class Server(object):
def run_server(self):
# just run server with system command
# currently we do not load cube
self.check_local_bin()
if not self.use_local_bin:
self.download_bin()
else:
print("Use local bin")
print("Use local bin : {}".format(self.bin_path))
command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \
......
......@@ -109,9 +109,10 @@ class Server(object):
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def set_local_bin(self, path):
self.use_local_bin = True
self.bin_path = path
def check_local_bin(self):
if "SERVING_BIN" in os.environ:
self.use_local_bin = True
self.bin_path = os.environ["SERVING_BIN"]
def set_gpuid(self, gpuid=0):
self.gpuid = gpuid
......@@ -243,8 +244,11 @@ class Server(object):
def run_server(self):
# just run server with system command
# currently we do not load cube
self.check_local_bin()
if not self.use_local_bin:
self.download_bin()
else:
print("Use local bin : {}".format(self.bin_path))
command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \
......
......@@ -35,7 +35,8 @@ def copy_lib():
os.popen('cp {} ./paddle_serving_client/lib'.format(text.strip().split(' ')[1]))
max_version, mid_version, min_version = python_version()
copy_lib()
if '${PACK}' == 'ON':
copy_lib()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle'
......@@ -91,4 +92,3 @@ setup(
],
license='Apache 2.0',
keywords=('paddle-serving serving-client deployment industrial easy-to-use'))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册