diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index c399a80d59c58292f8914c32a3faadb76139477f..0f6931781ed94fabdc0c67cca3e0e3bc977687a7 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -231,7 +231,7 @@ class Server(object): # At present, Serving needs to configure the model path in # the resource.prototxt file to determine the input and output # format of the workflow. To ensure that the input and output - # of multiple models are the same + # of multiple models are the same. workflow_oi_config_path = None if isinstance(model_config_paths, str): # If there is only one model path, use the default infer_op. diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index f1e83ea41288bbb309226cf75a402a69da2e9d4f..57461b068644583a5e48ec56d8ef81d729367602 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -266,7 +266,7 @@ class Server(object): # At present, Serving needs to configure the model path in # the resource.prototxt file to determine the input and output # format of the workflow. To ensure that the input and output - # of multiple models are the same + # of multiple models are the same. workflow_oi_config_path = None if isinstance(model_config_paths, str): # If there is only one model path, use the default infer_op.