@@ -47,3 +47,8 @@ serving_client (str, optional) - The path of configuration files for client. Def
model_filename (str, optional) - The name of file to load the inference program. If it is None, the default filename `__model__` will be used. Default: None.
paras_filename (str, optional) - The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. Default: None.
Or you can use a build-in python module called `paddle_serving_client.io.convert` to convert it.
help='Path of saved model files. Program file and parameter files are saved in this directory.'
)
parser.add_argument(
"--serving_server",
type=str,
default="serving_server",
help='The path of model files and configuration files for server. Default: "serving_server".'
)
parser.add_argument(
"--serving_client",
type=str,
default="serving_client",
help='The path of configuration files for client. Default: "serving_client".'
)
parser.add_argument(
"--model_filename",
type=str,
default=None,
help='The name of file to load the inference program. If it is None, the default filename __model__ will be used'
)
parser.add_argument(
"--params_filename",
type=str,
default=None,
help='The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. Default: None.'