diff --git a/CMakeLists.txt b/CMakeLists.txt index 2b6006c59fa4561f72210ba0e82f6ee9e6141982..cf67e6127fabfe998bedefda174fa38e2e358e39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -51,6 +51,7 @@ option(WITH_MKL "Compile Paddle Serving with MKL support." OFF) option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU" OFF) option(CLIENT_ONLY "Compile client libraries and demos only" OFF) option(WITH_ELASTIC_CTR "Compile ELASITC-CTR solution" OFF) +option(PACK "Compile for whl" OFF) set(WITH_MKLML ${WITH_MKL}) if (NOT DEFINED WITH_MKLDNN) diff --git a/core/util/src/timer.cc b/core/util/src/timer.cc index 836a100231f97e639c60caa2508f0f85887cd155..df9d4e91a6550ceb1de760e2421062541c0c34b8 100644 --- a/core/util/src/timer.cc +++ b/core/util/src/timer.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "core/util/include/timer.h" +#include namespace baidu { namespace paddle_serving { @@ -56,7 +56,7 @@ double Timer::ElapsedSec() { return _elapsed / 1000000.0; } int64_t Timer::TimeStampUS() { gettimeofday(&_now, NULL); - return _now.tv_usec; + return _now.tv_sec * 1000 * 1000L + _now.tv_usec; } int64_t Timer::Tickus() { diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index 6f85bdc54b426d4cbb596b97f5461c2b8e76edcd..70251cff90715241411105901bba1cd0581d7673 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -109,9 +109,10 @@ class Server(object): def set_memory_optimize(self, flag=False): self.memory_optimization = flag - def set_local_bin(self, path): - self.use_local_bin = True - self.bin_path = path + def check_local_bin(self): + if "SERVING_BIN" in os.environ: + self.use_local_bin = True + self.bin_path = os.environ["SERVING_BIN"] def _prepare_engine(self, model_config_path, device): if self.model_toolkit_conf == None: @@ -258,10 +259,11 @@ class Server(object): def run_server(self): # just run server with system command # currently we do not load cube + self.check_local_bin() if not self.use_local_bin: self.download_bin() else: - print("Use local bin") + print("Use local bin : {}".format(self.bin_path)) command = "{} " \ "-enable_model_toolkit " \ "-inferservice_path {} " \ diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index 1cb860c71447d9672dee8ec4cd48e7fcb62ca5f5..4372884a290bb98faf4416c2cb0e872bc4c76003 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -109,9 +109,10 @@ class Server(object): def set_memory_optimize(self, flag=False): self.memory_optimization = flag - def set_local_bin(self, path): - self.use_local_bin = True - self.bin_path = path + def check_local_bin(self): + if "SERVING_BIN" in os.environ: + self.use_local_bin = True + self.bin_path = os.environ["SERVING_BIN"] def set_gpuid(self, gpuid=0): self.gpuid = gpuid @@ -243,8 +244,11 @@ class Server(object): def run_server(self): # just run server with system command # currently we do not load cube + self.check_local_bin() if not self.use_local_bin: self.download_bin() + else: + print("Use local bin : {}".format(self.bin_path)) command = "{} " \ "-enable_model_toolkit " \ "-inferservice_path {} " \ diff --git a/python/setup.py.client.in b/python/setup.py.client.in index ce4d65546c83ad3c374af124ee767fbe42ef9429..20b611b1b59a6d1e4e15d8e998da685b6ced262d 100644 --- a/python/setup.py.client.in +++ b/python/setup.py.client.in @@ -35,7 +35,8 @@ def copy_lib(): os.popen('cp {} ./paddle_serving_client/lib'.format(text.strip().split(' ')[1])) max_version, mid_version, min_version = python_version() -copy_lib() +if '${PACK}' == 'ON': + copy_lib() REQUIRED_PACKAGES = [ 'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle' @@ -91,4 +92,3 @@ setup( ], license='Apache 2.0', keywords=('paddle-serving serving-client deployment industrial easy-to-use')) -