From 9a1ee0c6f624a778735a6c0f5a9c1676a199919d Mon Sep 17 00:00:00 2001 From: wangna11BD Date: Tue, 13 Jul 2021 03:08:30 +0000 Subject: [PATCH] add docs --- applications/tools/lapstyle.py | 2 +- docs/en_US/tutorials/lap_style.md | 24 ++++++++++++++++++------ docs/zh_CN/tutorials/lap_style.md | 24 ++++++++++++++++++------ ppgan/apps/lapstyle_predictor.py | 7 ++++++- 4 files changed, 43 insertions(+), 14 deletions(-) diff --git a/applications/tools/lapstyle.py b/applications/tools/lapstyle.py index 725b572..e4f0fcc 100644 --- a/applications/tools/lapstyle.py +++ b/applications/tools/lapstyle.py @@ -18,7 +18,7 @@ if __name__ == "__main__": parser.add_argument("--weight_path", type=str, default=None, - help="path to model checkpoint path") + help="path to model weight path") parser.add_argument( "--style", diff --git a/docs/en_US/tutorials/lap_style.md b/docs/en_US/tutorials/lap_style.md index 9f52dda..48bc99b 100644 --- a/docs/en_US/tutorials/lap_style.md +++ b/docs/en_US/tutorials/lap_style.md @@ -13,14 +13,26 @@ Artistic style transfer aims at migrating the style from an example image to a c ![lapstyle_overview](https://user-images.githubusercontent.com/79366697/118654987-b24dc100-b81b-11eb-9430-d84630f80511.png) -## 2 How to use +## 2 Quick experience +``` +python applications/tools/lapstyle.py --content_img ${PATH_OF_CONTENT_IMG} +``` +### Parameters + +- `--content_img (str)`: path to content image. +- `--output_path (str)`: path to output image dir, default value:`output_dir`. +- `--weight_path (str)`: path to model weight path, if `weight_path` is `None`, the pre-training model will be downloaded automatically, default value:`None`. +- `--style (str)`: style of output image, if `weight_path` is `None`, `style` can be chosen in `starrynew`, `circuit`, `ocean` and `stars`, default value:`starrynew`. +- `--style_image_path (str)`: path to style image, it need to input when `weight_path` is not `None`, default value:`None`. + +## 3 How to use -### 2.1 Prepare Datasets +### 3.1 Prepare Datasets To train LapStyle, we use the COCO dataset as content set. And you can choose any style image you like. Before training or testing, remember modify the data path of style image in the config file. -### 2.2 Train +### 3.2 Train Datasets used in example is COCO, you can also change it to your own dataset in the config file. @@ -40,14 +52,14 @@ python -u tools/main.py --config-file configs/lapstyle_rev_first.yaml --load ${P python -u tools/main.py --config-file configs/lapstyle_rev_second.yaml --load ${PATH_OF_LAST_STAGE_WEIGHT} ``` -### 2.4 Test +### 3.4 Test To test the trained model, you can directly test the "lapstyle_rev_second", since it also contains the trained weight of previous stages: ``` python tools/main.py --config-file configs/lapstyle_rev_second.yaml --evaluate-only --load ${PATH_OF_WEIGHT} ``` -## 3 Results +## 4 Results | Style | Stylized Results | | --- | --- | @@ -56,7 +68,7 @@ python tools/main.py --config-file configs/lapstyle_rev_second.yaml --evaluate-o | ![stars](https://user-images.githubusercontent.com/79366697/118655423-20928380-b81c-11eb-92bd-0deeb320ff14.png) | ![chicago_stylized_stars_512](https://user-images.githubusercontent.com/79366697/118655638-50da2200-b81c-11eb-9223-58d5df022fa5.png)| | ![circuit](https://user-images.githubusercontent.com/79366697/118655399-196b7580-b81c-11eb-8bc5-d5ece80c18ba.jpg) | ![chicago_stylized_circuit](https://user-images.githubusercontent.com/79366697/118655660-56376c80-b81c-11eb-87f2-64ae5a82375c.png)| -## 4 Pre-trained models +## 5 Pre-trained models We also provide several trained models. diff --git a/docs/zh_CN/tutorials/lap_style.md b/docs/zh_CN/tutorials/lap_style.md index 339391c..9aff005 100644 --- a/docs/zh_CN/tutorials/lap_style.md +++ b/docs/zh_CN/tutorials/lap_style.md @@ -12,13 +12,25 @@ LapStyle首先通过绘图网络(Drafting Network)传输低分辨率的全 ![lapstyle_overview](https://user-images.githubusercontent.com/79366697/118654987-b24dc100-b81b-11eb-9430-d84630f80511.png) -## 2 如何使用 +## 2 快速体验 +``` +python applications/tools/lapstyle.py --content_img ${PATH_OF_CONTENT_IMG} +``` +### **参数** + +- `--content_img (str)`: 输入的内容图像路径。 +- `--output_path (str)`: 输出的图像路径,默认为`output_dir`。 +- `--weight_path (str)`: 模型权重路径,设置`None`时会自行下载预训练模型,默认为`None`。 +- `--style (str)`: 生成图像风格,当`weight_path`为`None`时,可以在`starrynew`, `circuit`, `ocean` 和 `stars`中选择,默认为`starrynew`。 +- `--style_image_path (str)`: 输入的风格图像路径,当`weight_path`不为`None`时需要输入,默认为`None`。 + +## 3 如何使用 -### 2.1 数据准备 +### 3.1 数据准备 为了训练LapStyle,我们使用COCO数据集作为内容数据集。您可以任意选择您喜欢的风格图片。在开始训练与测试之前,记得修改配置文件的数据路径。 -### 2.2 训练 +### 3.2 训练 示例以COCO数据为例。如果您想使用自己的数据集,可以在配置文件中修改数据集为您自己的数据集。 @@ -37,14 +49,14 @@ python -u tools/main.py --config-file configs/lapstyle_rev_first.yaml --load ${P python -u tools/main.py --config-file configs/lapstyle_rev_second.yaml --load ${PATH_OF_LAST_STAGE_WEIGHT} ``` -### 2.4 测试 +### 3.4 测试 测试训练好的模型,您可以直接测试 "lapstyle_rev_second",因为它包含了之前步骤里的训练权重: ``` python tools/main.py --config-file configs/lapstyle_rev_second.yaml --evaluate-only --load ${PATH_OF_WEIGHT} ``` -## 3 结果展示 +## 4 结果展示 | Style | Stylized Results | | --- | --- | @@ -54,7 +66,7 @@ python tools/main.py --config-file configs/lapstyle_rev_second.yaml --evaluate-o | ![circuit](https://user-images.githubusercontent.com/79366697/118655399-196b7580-b81c-11eb-8bc5-d5ece80c18ba.jpg) | ![chicago_stylized_circuit](https://user-images.githubusercontent.com/79366697/118655660-56376c80-b81c-11eb-87f2-64ae5a82375c.png)| -## 4 模型下载 +## 5 模型下载 我们提供几个训练好的权重。 diff --git a/ppgan/apps/lapstyle_predictor.py b/ppgan/apps/lapstyle_predictor.py index ec3d134..5ea8cd4 100644 --- a/ppgan/apps/lapstyle_predictor.py +++ b/ppgan/apps/lapstyle_predictor.py @@ -147,7 +147,10 @@ class LapStylePredictor(BasePredictor): else: raise Exception(f'has not implemented {style}.') else: - self.style_image_path = style_image_path + if style_image_path is None: + raise Exception('style_image_path can not be None.') + else: + self.style_image_path = style_image_path self.net_enc.set_dict(paddle.load(weight_path)['net_enc']) self.net_enc.eval() self.net_dec.set_dict(paddle.load(weight_path)['net_dec']) @@ -196,4 +199,6 @@ class LapStylePredictor(BasePredictor): stylized_visual = cv.cvtColor(stylized_visual, cv.COLOR_RGB2BGR) cv.imwrite(os.path.join(self.output, 'stylized.png'), stylized_visual) + print('Model LapStyle output images path:', self.output) + return stylized -- GitLab