Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
0b01810f
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0b01810f
编写于
11月 04, 2022
作者:
jm_12138
提交者:
GitHub
11月 04, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update falsr_a (#1987)
* update falsr_a * add clean func * update falsr
上级
ca09b195
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
143 addition
and
73 deletion
+143
-73
modules/image/Image_editing/super_resolution/falsr_a/README.md
...es/image/Image_editing/super_resolution/falsr_a/README.md
+14
-15
modules/image/Image_editing/super_resolution/falsr_a/README_en.md
...image/Image_editing/super_resolution/falsr_a/README_en.md
+14
-15
modules/image/Image_editing/super_resolution/falsr_a/data_feed.py
...image/Image_editing/super_resolution/falsr_a/data_feed.py
+1
-1
modules/image/Image_editing/super_resolution/falsr_a/module.py
...es/image/Image_editing/super_resolution/falsr_a/module.py
+28
-42
modules/image/Image_editing/super_resolution/falsr_a/test.py
modules/image/Image_editing/super_resolution/falsr_a/test.py
+86
-0
未找到文件。
modules/image/Image_editing/super_resolution/falsr_a/README.md
浏览文件 @
0b01810f
...
...
@@ -68,12 +68,11 @@
-
### 3、API
-
```python
def reconstruct(self,
images=None,
paths=None,
use_gpu=False,
visualization=False,
output_dir="falsr_a_output")
def reconstruct(images=None,
paths=None,
use_gpu=False,
visualization=False,
output_dir="falsr_a_output")
```
- 预测API,用于图像超分辨率。
...
...
@@ -93,21 +92,14 @@
* data (numpy.ndarray): 超分辨后图像。
-
```python
def save_inference_model(self,
dirname='falsr_a_save_model',
model_filename=None,
params_filename=None,
combined=False)
def save_inference_model(dirname)
```
- 将模型保存到指定路径。
- **参数**
* dirname: 存在模型的目录名称
* model\_filename: 模型文件名称,默认为\_\_model\_\_
* params\_filename: 参数文件名称,默认为\_\_params\_\_(仅当`combined`为True时生效)
* combined: 是否将参数保存到统一的一个文件中
* dirname: 模型保存路径
...
...
@@ -167,3 +159,10 @@
初始发布
*
1.1.0
移除 fluid API
```
shell
$
hub
install
falsr_a
==
1.1.0
```
\ No newline at end of file
modules/image/Image_editing/super_resolution/falsr_a/README_en.md
浏览文件 @
0b01810f
...
...
@@ -71,12 +71,11 @@
-
### 3、API
-
```python
def reconstruct(self,
images=None,
paths=None,
use_gpu=False,
visualization=False,
output_dir="falsr_a_output")
def reconstruct(images=None,
paths=None,
use_gpu=False,
visualization=False,
output_dir="falsr_a_output")
```
- Prediction API.
...
...
@@ -95,21 +94,14 @@
* data (numpy.ndarray): result of super resolution.
-
```python
def save_inference_model(self,
dirname='falsr_a_save_model',
model_filename=None,
params_filename=None,
combined=False)
def save_inference_model(dirname)
```
- Save the model to the specified path.
- **Parameters**
* dirname: Save path.
* model\_filename: model file name,defalt is \_\_model\_\_
* params\_filename: parameter file name,defalt is \_\_params\_\_(Only takes effect when `combined` is True)
* combined: Whether to save the parameters to a unified file.
* dirname: Model save path.
...
...
@@ -169,5 +161,12 @@
First release
-
1.1.0
Remove Fluid API
```
shell
$
hub
install
falsr_a
==
1.1.0
```
modules/image/Image_editing/super_resolution/falsr_a/data_feed.py
浏览文件 @
0b01810f
...
...
@@ -5,7 +5,7 @@ from collections import OrderedDict
import
cv2
import
numpy
as
np
from
PIL
import
Image
__all__
=
[
'reader'
]
...
...
modules/image/Image_editing/super_resolution/falsr_a/module.py
浏览文件 @
0b01810f
...
...
@@ -18,13 +18,14 @@ import os
import
argparse
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddlehub
as
hub
from
paddle.fluid.core
import
PaddleTensor
,
AnalysisConfig
,
create_paddle_predictor
import
paddle
import
paddle.jit
import
paddle.static
from
paddle.inference
import
Config
,
create_predictor
from
paddlehub.module.module
import
moduleinfo
,
runnable
,
serving
from
falsr_a
.data_feed
import
reader
from
falsr_a
.processor
import
postprocess
,
base64_to_cv2
,
cv2_to_base64
,
check_dir
from
.data_feed
import
reader
from
.processor
import
postprocess
,
base64_to_cv2
,
cv2_to_base64
,
check_dir
@
moduleinfo
(
...
...
@@ -33,21 +34,22 @@ from falsr_a.processor import postprocess, base64_to_cv2, cv2_to_base64, check_d
author
=
"paddlepaddle"
,
author_email
=
""
,
summary
=
"falsr_a is a super resolution model."
,
version
=
"1.
0
.0"
)
class
Falsr_A
(
hub
.
Module
)
:
def
_
initialize
(
self
):
self
.
default_pretrained_model_path
=
os
.
path
.
join
(
self
.
directory
,
"falsr_a_model"
)
version
=
"1.
1
.0"
)
class
Falsr_A
:
def
_
_init__
(
self
):
self
.
default_pretrained_model_path
=
os
.
path
.
join
(
self
.
directory
,
"falsr_a_model"
,
"model"
)
self
.
_set_config
()
def
_set_config
(
self
):
"""
predictor config setting
"""
self
.
model_file_path
=
self
.
default_pretrained_model_path
cpu_config
=
AnalysisConfig
(
self
.
model_file_path
)
model
=
self
.
default_pretrained_model_path
+
'.pdmodel'
params
=
self
.
default_pretrained_model_path
+
'.pdiparams'
cpu_config
=
Config
(
model
,
params
)
cpu_config
.
disable_glog_info
()
cpu_config
.
disable_gpu
()
self
.
cpu_predictor
=
create_p
addle_p
redictor
(
cpu_config
)
self
.
cpu_predictor
=
create_predictor
(
cpu_config
)
try
:
_places
=
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
...
...
@@ -56,10 +58,10 @@ class Falsr_A(hub.Module):
except
:
use_gpu
=
False
if
use_gpu
:
gpu_config
=
AnalysisConfig
(
self
.
model_file_path
)
gpu_config
=
Config
(
model
,
params
)
gpu_config
.
disable_glog_info
()
gpu_config
.
enable_use_gpu
(
memory_pool_init_size_mb
=
1000
,
device_id
=
0
)
self
.
gpu_predictor
=
create_p
addle_p
redictor
(
gpu_config
)
self
.
gpu_predictor
=
create_predictor
(
gpu_config
)
def
reconstruct
(
self
,
images
=
None
,
paths
=
None
,
use_gpu
=
False
,
visualization
=
False
,
output_dir
=
"falsr_a_output"
):
"""
...
...
@@ -96,11 +98,18 @@ class Falsr_A(hub.Module):
for
i
in
range
(
total_num
):
image_y
=
np
.
array
([
all_data
[
i
][
'img_y'
]])
image_scale_pbpr
=
np
.
array
([
all_data
[
i
][
'img_scale_pbpr'
]])
image_y
=
PaddleTensor
(
image_y
.
copy
())
image_scale_pbpr
=
PaddleTensor
(
image_scale_pbpr
.
copy
())
output
=
self
.
gpu_predictor
.
run
([
image_y
,
image_scale_pbpr
])
if
use_gpu
else
self
.
cpu_predictor
.
run
(
[
image_y
,
image_scale_pbpr
])
output
=
np
.
expand_dims
(
output
[
0
].
as_ndarray
(),
axis
=
1
)
predictor
=
self
.
gpu_predictor
if
use_gpu
else
self
.
cpu_predictor
input_names
=
predictor
.
get_input_names
()
input_handle
=
predictor
.
get_input_handle
(
input_names
[
0
])
input_handle
.
copy_from_cpu
(
image_y
.
copy
())
input_handle
=
predictor
.
get_input_handle
(
input_names
[
1
])
input_handle
.
copy_from_cpu
(
image_scale_pbpr
.
copy
())
predictor
.
run
()
output_names
=
predictor
.
get_output_names
()
output_handle
=
predictor
.
get_output_handle
(
output_names
[
0
])
output
=
np
.
expand_dims
(
output_handle
.
copy_to_cpu
(),
axis
=
1
)
out
=
postprocess
(
data_out
=
output
,
org_im
=
all_data
[
i
][
'org_im'
],
...
...
@@ -111,29 +120,6 @@ class Falsr_A(hub.Module):
res
.
append
(
out
)
return
res
def
save_inference_model
(
self
,
dirname
=
'falsr_a_save_model'
,
model_filename
=
None
,
params_filename
=
None
,
combined
=
False
):
if
combined
:
model_filename
=
"__model__"
if
not
model_filename
else
model_filename
params_filename
=
"__params__"
if
not
params_filename
else
params_filename
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
program
,
feeded_var_names
,
target_vars
=
fluid
.
io
.
load_inference_model
(
dirname
=
self
.
default_pretrained_model_path
,
executor
=
exe
)
fluid
.
io
.
save_inference_model
(
dirname
=
dirname
,
main_program
=
program
,
executor
=
exe
,
feeded_var_names
=
feeded_var_names
,
target_vars
=
target_vars
,
model_filename
=
model_filename
,
params_filename
=
params_filename
)
@
serving
def
serving_method
(
self
,
images
,
**
kwargs
):
"""
...
...
modules/image/Image_editing/super_resolution/falsr_a/test.py
0 → 100644
浏览文件 @
0b01810f
import
os
import
shutil
import
unittest
import
cv2
import
requests
import
numpy
as
np
import
paddlehub
as
hub
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
'0'
class
TestHubModule
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
)
->
None
:
img_url
=
'https://unsplash.com/photos/1sLIu1XKQrY/download?ixid=MnwxMjA3fDB8MXxhbGx8MTJ8fHx8fHwyfHwxNjYyMzQxNDUx&force=true&w=640'
if
not
os
.
path
.
exists
(
'tests'
):
os
.
makedirs
(
'tests'
)
response
=
requests
.
get
(
img_url
)
assert
response
.
status_code
==
200
,
'Network Error.'
with
open
(
'tests/test.jpg'
,
'wb'
)
as
f
:
f
.
write
(
response
.
content
)
cls
.
module
=
hub
.
Module
(
name
=
"falsr_a"
)
@
classmethod
def
tearDownClass
(
cls
)
->
None
:
shutil
.
rmtree
(
'tests'
)
shutil
.
rmtree
(
'inference'
)
shutil
.
rmtree
(
'falsr_a_output'
)
def
test_reconstruct1
(
self
):
results
=
self
.
module
.
reconstruct
(
paths
=
[
'tests/test.jpg'
],
use_gpu
=
False
,
visualization
=
False
)
self
.
assertIsInstance
(
results
[
0
][
'data'
],
np
.
ndarray
)
def
test_reconstruct2
(
self
):
results
=
self
.
module
.
reconstruct
(
images
=
[
cv2
.
imread
(
'tests/test.jpg'
)],
use_gpu
=
False
,
visualization
=
False
)
self
.
assertIsInstance
(
results
[
0
][
'data'
],
np
.
ndarray
)
def
test_reconstruct3
(
self
):
results
=
self
.
module
.
reconstruct
(
images
=
[
cv2
.
imread
(
'tests/test.jpg'
)],
use_gpu
=
False
,
visualization
=
True
)
self
.
assertIsInstance
(
results
[
0
][
'data'
],
np
.
ndarray
)
def
test_reconstruct4
(
self
):
results
=
self
.
module
.
reconstruct
(
images
=
[
cv2
.
imread
(
'tests/test.jpg'
)],
use_gpu
=
True
,
visualization
=
False
)
self
.
assertIsInstance
(
results
[
0
][
'data'
],
np
.
ndarray
)
def
test_reconstruct5
(
self
):
self
.
assertRaises
(
AssertionError
,
self
.
module
.
reconstruct
,
paths
=
[
'no.jpg'
]
)
def
test_reconstruct6
(
self
):
self
.
assertRaises
(
AttributeError
,
self
.
module
.
reconstruct
,
images
=
[
'test.jpg'
]
)
def
test_save_inference_model
(
self
):
self
.
module
.
save_inference_model
(
'./inference/model'
)
self
.
assertTrue
(
os
.
path
.
exists
(
'./inference/model.pdmodel'
))
self
.
assertTrue
(
os
.
path
.
exists
(
'./inference/model.pdiparams'
))
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录