diff --git a/docs/paddlex_gui/download.md b/docs/paddlex_gui/download.md index 77bb9962b37498ec3279a51cdc1faa34da1f498b..bf5d2ceaeadfc14612d2d83498796108469ae166 100644 --- a/docs/paddlex_gui/download.md +++ b/docs/paddlex_gui/download.md @@ -25,4 +25,3 @@ * **硬盘空间**:建议SSD剩余空间1T以上(非必须) ***注:PaddleX在Windows及Mac OS系统只支持单卡模型。Windows系统暂不支持NCCL。*** - diff --git a/docs/paddlex_gui/how_to_use.md b/docs/paddlex_gui/how_to_use.md index 32740c114242ccc2c6b7ecacc3088ba163fe7a3c..db5e9b1f58b3012e1104a7dfe8ff63394ecf3eee 100644 --- a/docs/paddlex_gui/how_to_use.md +++ b/docs/paddlex_gui/how_to_use.md @@ -42,7 +42,7 @@ PaddleX GUI是一个应用PaddleX实现的一个图形化开发客户端产品 在开始模型训练前,您需要根据不同的任务类型,将数据标注为相应的格式。目前PaddleX支持【图像分类】、【目标检测】、【语义分割】、【实例分割】四种任务类型。不同类型任务的数据处理方式可查看[数据标注方式](https://paddlex.readthedocs.io/zh_CN/latest/appendix/datasets.html)。 - + **第二步:导入我的数据集** @@ -116,26 +116,26 @@ PaddleX GUI是一个应用PaddleX实现的一个图形化开发客户端产品 PaddleX完全采用您本地的硬件进行计算,深度学习任务确实对算力要求较高,为了使您能快速体验应用PaddleX进行开发,我们适配了CPU硬件,但强烈建议您使用GPU以提升训练速度和开发体验。 - + 2. **我可以在服务器或云平台上部署PaddleX么?** PaddleX GUI是一个适配本地单机安装的客户端,无法在服务器上直接进行部署,您可以直接使用PaddleX API,或采用飞桨核心框架进行服务器上的部署。如果您希望使用公有算力,强烈建议您尝试飞桨产品系列中的 [EasyDL](https://ai.baidu.com/easydl/) 或 [AI Studio](https://aistudio.baidu.com/aistudio/index)进行开发。 - + 3. **PaddleX支持EasyData标注的数据吗?** 支持,PaddleX可顺畅读取EasyData标注的数据。但当前版本的PaddleX GUI暂时无法支持直接导入EasyData数据格式,您可以参照文档,将[数据集进行转换](https://paddlex.readthedocs.io/zh_CN/latest/appendix/how_to_convert_dataset.html)再导入PaddleX GUI进行后续开发。 同时,我们也在紧密开发PaddleX GUI可直接导入EasyData数据格式的功能。 - - + + 4. **为什么模型裁剪分析耗时这么长?** 模型裁剪分析过程是对模型各卷积层的敏感度信息进行分析,根据各参数对模型效果的影响进行不同比例的裁剪。此过程需要重复多次直至FLOPS满足要求,最后再进行精调训练获得最终裁剪后的模型,因此耗时较长。有关模型裁剪的原理,可参见文档[剪裁原理介绍](https://paddlepaddle.github.io/PaddleSlim/algo/algo.html#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86) - + 5. **如何调用后端代码?** diff --git a/docs/tutorials/datasets.md b/docs/tutorials/datasets.md index b197b43b6c1ce2dd8c91bae3c484573365493ba0..8264d06a91ba1125036d4ab44f1fc06fe11d3049 100755 --- a/docs/tutorials/datasets.md +++ b/docs/tutorials/datasets.md @@ -224,7 +224,7 @@ labelB └--labels.txt # 标签列表文件 ``` -其中,图像文件名应与json文件名一一对应。 +其中,图像文件名应与json文件名一一对应。 每个json文件存储于`labels`相关的信息。如下所示: ``` @@ -269,17 +269,17 @@ labelB └--labels.txt # 标签列表文件 ``` -其中,图像文件名应与json文件名一一对应。 +其中,图像文件名应与json文件名一一对应。 每个json文件存储于`labels`相关的信息。如下所示: ``` -"labels": [{"y1": 18, "x2": 883, "x1": 371, "y2": 404, "name": "labelA", - "mask": "kVfc0`0Zg0 0, "The batch_size should be greater than 0." assert algo in self._support_algo_type, \ "The algo should be KL, abs_max or min_max." - + self._executor = executor self._dataset = dataset self._batch_size = batch_size @@ -154,20 +154,19 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): logging.info("Start to run batch!") for data in self._data_loader(): start = time.time() - self._executor.run( - program=self._program, - feed=data, - fetch_list=self._fetch_list, - return_numpy=False) + with fluid.scope_guard(self._scope): + self._executor.run(program=self._program, + feed=data, + fetch_list=self._fetch_list, + return_numpy=False) if self._algo == "KL": self._sample_data(batch_id) else: self._sample_threshold() end = time.time() - logging.debug('[Run batch data] Batch={}/{}, time_each_batch={} s.'.format( - str(batch_id + 1), - str(batch_ct), - str(end-start))) + logging.debug( + '[Run batch data] Batch={}/{}, time_each_batch={} s.'.format( + str(batch_id + 1), str(batch_ct), str(end - start))) batch_id += 1 if self._batch_nums and batch_id >= self._batch_nums: break @@ -194,15 +193,16 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): Returns: None ''' - feed_vars_names = [var.name for var in self._feed_list] - fluid.io.save_inference_model( - dirname=save_model_path, - feeded_var_names=feed_vars_names, - target_vars=self._fetch_list, - executor=self._executor, - params_filename='__params__', - main_program=self._program) - + with fluid.scope_guard(self._scope): + feed_vars_names = [var.name for var in self._feed_list] + fluid.io.save_inference_model( + dirname=save_model_path, + feeded_var_names=feed_vars_names, + target_vars=self._fetch_list, + executor=self._executor, + params_filename='__params__', + main_program=self._program) + def _load_model_data(self): ''' Set data loader. @@ -212,7 +212,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): self._data_loader = fluid.io.DataLoader.from_generator( feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True) self._data_loader.set_sample_list_generator( - self._dataset.generator(self._batch_size, drop_last=True), + self._dataset.generator( + self._batch_size, drop_last=True), places=self._place) def _calculate_kl_threshold(self): @@ -235,10 +236,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): weight_threshold.append(abs_max_value) self._quantized_var_kl_threshold[var_name] = weight_threshold end = time.time() - logging.debug('[Calculate weight] Weight_id={}/{}, time_each_weight={} s.'.format( - str(ct), - str(len(self._quantized_weight_var_name)), - str(end-start))) + logging.debug( + '[Calculate weight] Weight_id={}/{}, time_each_weight={} s.'. + format( + str(ct), + str(len(self._quantized_weight_var_name)), str(end - + start))) ct += 1 ct = 1 @@ -257,10 +260,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): self._quantized_var_kl_threshold[var_name] = \ self._get_kl_scaling_factor(np.abs(sampling_data)) end = time.time() - logging.debug('[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.format( - str(ct), - str(len(self._quantized_act_var_name)), - str(end-start))) + logging.debug( + '[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'. + format( + str(ct), + str(len(self._quantized_act_var_name)), + str(end - start))) ct += 1 else: for var_name in self._quantized_act_var_name: @@ -270,10 +275,10 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): self._quantized_var_kl_threshold[var_name] = \ self._get_kl_scaling_factor(np.abs(self._sampling_data[var_name])) end = time.time() - logging.debug('[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.format( - str(ct), - str(len(self._quantized_act_var_name)), - str(end-start))) + logging.debug( + '[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'. + format( + str(ct), + str(len(self._quantized_act_var_name)), + str(end - start))) ct += 1 - - \ No newline at end of file diff --git a/paddlex/cv/models/yolo_v3.py b/paddlex/cv/models/yolo_v3.py index 85ee89fc86851ff9be104d0ee258eefce9843a69..0417431bdda69f109fc0a40f30d0ddac85174e82 100644 --- a/paddlex/cv/models/yolo_v3.py +++ b/paddlex/cv/models/yolo_v3.py @@ -1,11 +1,11 @@ # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -313,10 +313,12 @@ class YOLOv3(BaseAPI): images = np.array([d[0] for d in data]) im_sizes = np.array([d[1] for d in data]) feed_data = {'image': images, 'im_size': im_sizes} - outputs = self.exe.run(self.test_prog, - feed=[feed_data], - fetch_list=list(self.test_outputs.values()), - return_numpy=False) + with fluid.scope_guard(self.scope): + outputs = self.exe.run( + self.test_prog, + feed=[feed_data], + fetch_list=list(self.test_outputs.values()), + return_numpy=False) res = { 'bbox': (np.array(outputs[0]), outputs[0].recursive_sequence_lengths()) @@ -366,12 +368,13 @@ class YOLOv3(BaseAPI): im, im_size = self.test_transforms(img_file) im = np.expand_dims(im, axis=0) im_size = np.expand_dims(im_size, axis=0) - outputs = self.exe.run(self.test_prog, - feed={'image': im, - 'im_size': im_size}, - fetch_list=list(self.test_outputs.values()), - return_numpy=False, - use_program_cache=True) + with fluid.scope_guard(self.scope): + outputs = self.exe.run(self.test_prog, + feed={'image': im, + 'im_size': im_size}, + fetch_list=list(self.test_outputs.values()), + return_numpy=False, + use_program_cache=True) res = { k: (np.array(v), v.recursive_sequence_lengths()) for k, v in zip(list(self.test_outputs.keys()), outputs)