diff --git a/modules/image/image_processing/enlightengan/README.md b/modules/image/image_processing/enlightengan/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef46cb940029b07ca2a2d5594c831815962b1be9
--- /dev/null
+++ b/modules/image/image_processing/enlightengan/README.md
@@ -0,0 +1,137 @@
+# enlightengan
+
+|模型名称|enlightengan|
+| :--- | :---: |
+|类别|图像 - 暗光增强|
+|网络|EnlightenGAN|
+|数据集|-|
+|是否支持Fine-tuning|否|
+|模型大小|83MB|
+|最新更新日期|2021-11-04|
+|数据指标|-|
+
+
+## 一、模型基本信息
+
+- ### 应用效果展示
+ - 样例结果示例:
+
+
+
+ 输入图像
+
+
+
+ 输出图像
+
+
+
+- ### 模型介绍
+
+ - EnlightenGAN使用非成对的数据进行训练,通过设计自特征保留损失函数和自约束注意力机制,训练的网络可以应用到多种场景下的暗光增强中。
+
+ - 更多详情参考:[EnlightenGAN: Deep Light Enhancement without Paired Supervision](https://arxiv.org/abs/1906.06972)
+
+
+
+## 二、安装
+
+- ### 1、环境依赖
+ - onnxruntime
+ - x2paddle
+ - pillow
+
+- ### 2、安装
+
+ - ```shell
+ $ hub install enlightengan
+ ```
+ - 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
+ | [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
+
+## 三、模型API预测
+
+- ### 1、命令行预测
+
+ - ```shell
+ # Read from a file
+ $ hub run enlightengan --input_path "/PATH/TO/IMAGE"
+ ```
+ - 通过命令行方式实现暗光增强模型的调用,更多请见 [PaddleHub命令行指令](../../../../docs/docs_ch/tutorial/cmd_usage.rst)
+
+- ### 2、预测代码示例
+
+ - ```python
+ import paddlehub as hub
+
+ enlightener = hub.Module(name="enlightengan")
+ input_path = ["/PATH/TO/IMAGE"]
+ # Read from a file
+ enlightener.enlightening(paths=input_path, output_dir='./enlightening_result/', use_gpu=True)
+ ```
+
+- ### 3、API
+
+ - ```python
+ def enlightening(images=None, paths=None, output_dir='./enlightening_result/', use_gpu=False, visualization=True)
+ ```
+ - 暗光增强API。
+
+ - **参数**
+
+ - images (list\[numpy.ndarray\]): 图片数据,ndarray.shape 为 \[H, W, C\];
+ - paths (list\[str\]): 图片的路径;
+ - output\_dir (str): 结果保存的路径;
+ - use\_gpu (bool): 是否使用 GPU;
+ - visualization(bool): 是否保存结果到本地文件夹
+
+
+## 四、服务部署
+
+- PaddleHub Serving可以部署一个在线图像风格转换服务。
+
+- ### 第一步:启动PaddleHub Serving
+
+ - 运行启动命令:
+ - ```shell
+ $ hub serving start -m enlightengan
+ ```
+
+ - 这样就完成了一个图像风格转换的在线服务API的部署,默认端口号为8866。
+
+ - **NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA\_VISIBLE\_DEVICES环境变量,否则不用设置。
+
+- ### 第二步:发送预测请求
+
+ - 配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果
+
+ - ```python
+ import requests
+ import json
+ import cv2
+ import base64
+
+
+ def cv2_to_base64(image):
+ data = cv2.imencode('.jpg', image)[1]
+ return base64.b64encode(data.tostring()).decode('utf8')
+
+ # 发送HTTP请求
+ data = {'images':[cv2_to_base64(cv2.imread("/PATH/TO/IMAGE"))]}
+ headers = {"Content-type": "application/json"}
+ url = "http://127.0.0.1:8866/predict/enlightengan"
+ r = requests.post(url=url, headers=headers, data=json.dumps(data))
+
+ # 打印预测结果
+ print(r.json()["results"])
+ ```
+
+## 五、更新历史
+
+* 1.0.0
+
+ 初始发布
+
+ - ```shell
+ $ hub install enlightengan==1.0.0
+ ```
diff --git a/modules/image/image_processing/enlightengan/enlighten_inference/pd_model/x2paddle_code.py b/modules/image/image_processing/enlightengan/enlighten_inference/pd_model/x2paddle_code.py
new file mode 100755
index 0000000000000000000000000000000000000000..d211efac274f7d6be42ee8a765726526d9e51888
--- /dev/null
+++ b/modules/image/image_processing/enlightengan/enlighten_inference/pd_model/x2paddle_code.py
@@ -0,0 +1,201 @@
+import paddle
+import math
+
+
+class ONNXModel(paddle.nn.Layer):
+ def __init__(self):
+ super(ONNXModel, self).__init__()
+ self.conv0 = paddle.nn.Conv2D(in_channels=3, out_channels=3, kernel_size=[1, 1], groups=3)
+ self.pool0 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.pool1 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.conv1 = paddle.nn.Conv2D(in_channels=4, out_channels=32, kernel_size=[3, 3], padding=1)
+ self.pool2 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.leakyrelu0 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.pool3 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.batchnorm0 = paddle.nn.BatchNorm(
+ num_channels=32, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv2 = paddle.nn.Conv2D(in_channels=32, out_channels=32, kernel_size=[3, 3], padding=1)
+ self.leakyrelu1 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm1 = paddle.nn.BatchNorm(
+ num_channels=32, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.pool4 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.conv3 = paddle.nn.Conv2D(in_channels=32, out_channels=64, kernel_size=[3, 3], padding=1)
+ self.leakyrelu2 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm2 = paddle.nn.BatchNorm(
+ num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv4 = paddle.nn.Conv2D(in_channels=64, out_channels=64, kernel_size=[3, 3], padding=1)
+ self.leakyrelu3 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm3 = paddle.nn.BatchNorm(
+ num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.pool5 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.conv5 = paddle.nn.Conv2D(in_channels=64, out_channels=128, kernel_size=[3, 3], padding=1)
+ self.leakyrelu4 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm4 = paddle.nn.BatchNorm(
+ num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv6 = paddle.nn.Conv2D(in_channels=128, out_channels=128, kernel_size=[3, 3], padding=1)
+ self.leakyrelu5 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm5 = paddle.nn.BatchNorm(
+ num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.pool6 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.conv7 = paddle.nn.Conv2D(in_channels=128, out_channels=256, kernel_size=[3, 3], padding=1)
+ self.leakyrelu6 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm6 = paddle.nn.BatchNorm(
+ num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv8 = paddle.nn.Conv2D(in_channels=256, out_channels=256, kernel_size=[3, 3], padding=1)
+ self.leakyrelu7 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm7 = paddle.nn.BatchNorm(
+ num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.pool7 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
+ self.conv9 = paddle.nn.Conv2D(in_channels=256, out_channels=512, kernel_size=[3, 3], padding=1)
+ self.leakyrelu8 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm8 = paddle.nn.BatchNorm(
+ num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv10 = paddle.nn.Conv2D(in_channels=512, out_channels=512, kernel_size=[3, 3], padding=1)
+ self.leakyrelu9 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm9 = paddle.nn.BatchNorm(
+ num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv11 = paddle.nn.Conv2D(in_channels=512, out_channels=256, kernel_size=[3, 3], padding=1)
+ self.conv12 = paddle.nn.Conv2D(in_channels=512, out_channels=256, kernel_size=[3, 3], padding=1)
+ self.leakyrelu10 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm10 = paddle.nn.BatchNorm(
+ num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv13 = paddle.nn.Conv2D(in_channels=256, out_channels=256, kernel_size=[3, 3], padding=1)
+ self.leakyrelu11 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm11 = paddle.nn.BatchNorm(
+ num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv14 = paddle.nn.Conv2D(in_channels=256, out_channels=128, kernel_size=[3, 3], padding=1)
+ self.conv15 = paddle.nn.Conv2D(in_channels=256, out_channels=128, kernel_size=[3, 3], padding=1)
+ self.leakyrelu12 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm12 = paddle.nn.BatchNorm(
+ num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv16 = paddle.nn.Conv2D(in_channels=128, out_channels=128, kernel_size=[3, 3], padding=1)
+ self.leakyrelu13 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm13 = paddle.nn.BatchNorm(
+ num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv17 = paddle.nn.Conv2D(in_channels=128, out_channels=64, kernel_size=[3, 3], padding=1)
+ self.conv18 = paddle.nn.Conv2D(in_channels=128, out_channels=64, kernel_size=[3, 3], padding=1)
+ self.leakyrelu14 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm14 = paddle.nn.BatchNorm(
+ num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv19 = paddle.nn.Conv2D(in_channels=64, out_channels=64, kernel_size=[3, 3], padding=1)
+ self.leakyrelu15 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm15 = paddle.nn.BatchNorm(
+ num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv20 = paddle.nn.Conv2D(in_channels=64, out_channels=32, kernel_size=[3, 3], padding=1)
+ self.conv21 = paddle.nn.Conv2D(in_channels=64, out_channels=32, kernel_size=[3, 3], padding=1)
+ self.leakyrelu16 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.batchnorm16 = paddle.nn.BatchNorm(
+ num_channels=32, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
+ self.conv22 = paddle.nn.Conv2D(in_channels=32, out_channels=32, kernel_size=[3, 3], padding=1)
+ self.leakyrelu17 = paddle.nn.LeakyReLU(negative_slope=0.20000000298023224)
+ self.conv23 = paddle.nn.Conv2D(in_channels=32, out_channels=3, kernel_size=[1, 1])
+
+ def forward(self, x2paddle_input):
+ x2paddle_137 = paddle.full(dtype='float32', shape=[1], fill_value=1.0)
+ x2paddle_145 = paddle.full(dtype='float32', shape=[1], fill_value=0.29899999499320984)
+ x2paddle_147 = paddle.full(dtype='float32', shape=[1], fill_value=0.5870000123977661)
+ x2paddle_150 = paddle.full(dtype='float32', shape=[1], fill_value=0.11400000005960464)
+ x2paddle_153 = paddle.full(dtype='float32', shape=[1], fill_value=2.0)
+ x2paddle_155 = paddle.full(dtype='float32', shape=[1], fill_value=1.0)
+ x2paddle_256 = paddle.full(dtype='float32', shape=[1], fill_value=1.0)
+ x2paddle_134 = self.conv0(x2paddle_input)
+ x2paddle_135, = paddle.split(x=x2paddle_134, num_or_sections=[1])
+ x2paddle_257 = paddle.multiply(x=x2paddle_134, y=x2paddle_256)
+ x2paddle_136 = paddle.squeeze(x=x2paddle_135, axis=[0])
+ x2paddle_138 = paddle.add(x=x2paddle_136, y=x2paddle_137)
+ x2paddle_139_p0, x2paddle_139_p1, x2paddle_139_p2 = paddle.split(x=x2paddle_138, num_or_sections=[1, 1, 1])
+ x2paddle_142 = paddle.squeeze(x=x2paddle_139_p0, axis=[0])
+ x2paddle_143 = paddle.squeeze(x=x2paddle_139_p1, axis=[0])
+ x2paddle_144 = paddle.squeeze(x=x2paddle_139_p2, axis=[0])
+ x2paddle_146 = paddle.multiply(x=x2paddle_142, y=x2paddle_145)
+ x2paddle_148 = paddle.multiply(x=x2paddle_143, y=x2paddle_147)
+ x2paddle_151 = paddle.multiply(x=x2paddle_144, y=x2paddle_150)
+ x2paddle_149 = paddle.add(x=x2paddle_146, y=x2paddle_148)
+ x2paddle_152 = paddle.add(x=x2paddle_149, y=x2paddle_151)
+ x2paddle_154 = paddle.divide(x=x2paddle_152, y=x2paddle_153)
+ x2paddle_156 = paddle.subtract(x=x2paddle_155, y=x2paddle_154)
+ x2paddle_157 = paddle.unsqueeze(x=x2paddle_156, axis=[0])
+ x2paddle_158 = paddle.unsqueeze(x=x2paddle_157, axis=[0])
+ x2paddle_159 = self.pool0(x2paddle_158)
+ x2paddle_163 = paddle.concat(x=[x2paddle_134, x2paddle_158], axis=1)
+ x2paddle_160 = self.pool1(x2paddle_159)
+ x2paddle_164 = self.conv1(x2paddle_163)
+ x2paddle_161 = self.pool2(x2paddle_160)
+ x2paddle_165 = self.leakyrelu0(x2paddle_164)
+ x2paddle_162 = self.pool3(x2paddle_161)
+ x2paddle_166 = self.batchnorm0(x2paddle_165)
+ x2paddle_167 = self.conv2(x2paddle_166)
+ x2paddle_168 = self.leakyrelu1(x2paddle_167)
+ x2paddle_169 = self.batchnorm1(x2paddle_168)
+ x2paddle_170 = self.pool4(x2paddle_169)
+ x2paddle_246 = paddle.multiply(x=x2paddle_169, y=x2paddle_158)
+ x2paddle_171 = self.conv3(x2paddle_170)
+ x2paddle_172 = self.leakyrelu2(x2paddle_171)
+ x2paddle_173 = self.batchnorm2(x2paddle_172)
+ x2paddle_174 = self.conv4(x2paddle_173)
+ x2paddle_175 = self.leakyrelu3(x2paddle_174)
+ x2paddle_176 = self.batchnorm3(x2paddle_175)
+ x2paddle_177 = self.pool5(x2paddle_176)
+ x2paddle_232 = paddle.multiply(x=x2paddle_176, y=x2paddle_159)
+ x2paddle_178 = self.conv5(x2paddle_177)
+ x2paddle_179 = self.leakyrelu4(x2paddle_178)
+ x2paddle_180 = self.batchnorm4(x2paddle_179)
+ x2paddle_181 = self.conv6(x2paddle_180)
+ x2paddle_182 = self.leakyrelu5(x2paddle_181)
+ x2paddle_183 = self.batchnorm5(x2paddle_182)
+ x2paddle_184 = self.pool6(x2paddle_183)
+ x2paddle_218 = paddle.multiply(x=x2paddle_183, y=x2paddle_160)
+ x2paddle_185 = self.conv7(x2paddle_184)
+ x2paddle_186 = self.leakyrelu6(x2paddle_185)
+ x2paddle_187 = self.batchnorm6(x2paddle_186)
+ x2paddle_188 = self.conv8(x2paddle_187)
+ x2paddle_189 = self.leakyrelu7(x2paddle_188)
+ x2paddle_190 = self.batchnorm7(x2paddle_189)
+ x2paddle_191 = self.pool7(x2paddle_190)
+ x2paddle_204 = paddle.multiply(x=x2paddle_190, y=x2paddle_161)
+ x2paddle_192 = self.conv9(x2paddle_191)
+ x2paddle_193 = self.leakyrelu8(x2paddle_192)
+ x2paddle_194 = self.batchnorm8(x2paddle_193)
+ x2paddle_195 = paddle.multiply(x=x2paddle_194, y=x2paddle_162)
+ x2paddle_196 = self.conv10(x2paddle_195)
+ x2paddle_197 = self.leakyrelu9(x2paddle_196)
+ x2paddle_198 = self.batchnorm9(x2paddle_197)
+ x2paddle_203 = paddle.nn.functional.interpolate(x=x2paddle_198, scale_factor=[2.0, 2.0], mode='bilinear')
+ x2paddle_205 = self.conv11(x2paddle_203)
+ x2paddle_206 = paddle.concat(x=[x2paddle_205, x2paddle_204], axis=1)
+ x2paddle_207 = self.conv12(x2paddle_206)
+ x2paddle_208 = self.leakyrelu10(x2paddle_207)
+ x2paddle_209 = self.batchnorm10(x2paddle_208)
+ x2paddle_210 = self.conv13(x2paddle_209)
+ x2paddle_211 = self.leakyrelu11(x2paddle_210)
+ x2paddle_212 = self.batchnorm11(x2paddle_211)
+ x2paddle_217 = paddle.nn.functional.interpolate(x=x2paddle_212, scale_factor=[2.0, 2.0], mode='bilinear')
+ x2paddle_219 = self.conv14(x2paddle_217)
+ x2paddle_220 = paddle.concat(x=[x2paddle_219, x2paddle_218], axis=1)
+ x2paddle_221 = self.conv15(x2paddle_220)
+ x2paddle_222 = self.leakyrelu12(x2paddle_221)
+ x2paddle_223 = self.batchnorm12(x2paddle_222)
+ x2paddle_224 = self.conv16(x2paddle_223)
+ x2paddle_225 = self.leakyrelu13(x2paddle_224)
+ x2paddle_226 = self.batchnorm13(x2paddle_225)
+ x2paddle_231 = paddle.nn.functional.interpolate(x=x2paddle_226, scale_factor=[2.0, 2.0], mode='bilinear')
+ x2paddle_233 = self.conv17(x2paddle_231)
+ x2paddle_234 = paddle.concat(x=[x2paddle_233, x2paddle_232], axis=1)
+ x2paddle_235 = self.conv18(x2paddle_234)
+ x2paddle_236 = self.leakyrelu14(x2paddle_235)
+ x2paddle_237 = self.batchnorm14(x2paddle_236)
+ x2paddle_238 = self.conv19(x2paddle_237)
+ x2paddle_239 = self.leakyrelu15(x2paddle_238)
+ x2paddle_240 = self.batchnorm15(x2paddle_239)
+ x2paddle_245 = paddle.nn.functional.interpolate(x=x2paddle_240, scale_factor=[2.0, 2.0], mode='bilinear')
+ x2paddle_247 = self.conv20(x2paddle_245)
+ x2paddle_248 = paddle.concat(x=[x2paddle_247, x2paddle_246], axis=1)
+ x2paddle_249 = self.conv21(x2paddle_248)
+ x2paddle_250 = self.leakyrelu16(x2paddle_249)
+ x2paddle_251 = self.batchnorm16(x2paddle_250)
+ x2paddle_252 = self.conv22(x2paddle_251)
+ x2paddle_253 = self.leakyrelu17(x2paddle_252)
+ x2paddle_254 = self.conv23(x2paddle_253)
+ x2paddle_255 = paddle.multiply(x=x2paddle_254, y=x2paddle_158)
+ x2paddle_output = paddle.add(x=x2paddle_255, y=x2paddle_257)
+ return x2paddle_output, x2paddle_255
diff --git a/modules/image/image_processing/enlightengan/module.py b/modules/image/image_processing/enlightengan/module.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8f441c55c32c364112d3b3121183cb3964596f
--- /dev/null
+++ b/modules/image/image_processing/enlightengan/module.py
@@ -0,0 +1,147 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import os
+
+import cv2
+import numpy as np
+import paddle
+
+import paddlehub as hub
+from .enlighten_inference.pd_model.x2paddle_code import ONNXModel
+from .util import base64_to_cv2
+from paddlehub.module.module import moduleinfo
+from paddlehub.module.module import runnable
+from paddlehub.module.module import serving
+
+
+@moduleinfo(name="enlightengan",
+ type="CV/enlighten",
+ author="paddlepaddle",
+ author_email="",
+ summary="",
+ version="1.0.0")
+class EnlightenGAN:
+
+ def __init__(self):
+ self.pretrained_model = os.path.join(self.directory, "enlighten_inference/pd_model")
+ self.model = ONNXModel()
+ params = paddle.load(os.path.join(self.pretrained_model, 'model.pdparams'))
+ self.model.set_dict(params, use_structured_name=True)
+
+ def enlightening(self,
+ images: list = None,
+ paths: list = None,
+ output_dir: str = './enlightening_result/',
+ use_gpu: bool = False,
+ visualization: bool = True):
+ '''
+ enlighten images in the low-light scene.
+
+ images (list[numpy.ndarray]): data of images, shape of each is [H, W, C], color space must be BGR(read by cv2).
+ paths (list[str]): paths to images
+ output_dir (str): the dir to save the results
+ use_gpu (bool): if True, use gpu to perform the computation, otherwise cpu.
+ visualization (bool): if True, save results in output_dir.
+ '''
+ results = []
+ paddle.disable_static()
+ place = 'gpu:0' if use_gpu else 'cpu'
+ place = paddle.set_device(place)
+ if images == None and paths == None:
+ print('No image provided. Please input an image or a image path.')
+ return
+ self.model.eval()
+
+ if images != None:
+ for image in images:
+ image = image[:, :, ::-1]
+ image = np.expand_dims(np.transpose(image, (2, 0, 1)).astype(np.float32) / 255., 0)
+ inputtensor = paddle.to_tensor(image)
+ out, out1 = self.model(inputtensor)
+ out = out.numpy()[0]
+ out = (np.transpose(out, (1, 2, 0)) + 1) / 2.0 * 255.0
+ out = np.clip(out, 0, 255)
+ out = out.astype('uint8')
+ results.append(out)
+
+ if paths != None:
+ for path in paths:
+ image = cv2.imread(path)[:, :, ::-1]
+ image = np.expand_dims(np.transpose(image, (2, 0, 1)).astype(np.float32) / 255., 0)
+ inputtensor = paddle.to_tensor(image)
+ out, out1 = self.model(inputtensor)
+ out = out.numpy()[0]
+ out = (np.transpose(out, (1, 2, 0)) + 1) / 2.0 * 255.0
+ out = np.clip(out, 0, 255)
+ out = out.astype('uint8')
+ results.append(out)
+
+ if visualization == True:
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir, exist_ok=True)
+ for i, out in enumerate(results):
+ cv2.imwrite(os.path.join(output_dir, 'output_{}.png'.format(i)), out[:, :, ::-1])
+
+ return results
+
+ @runnable
+ def run_cmd(self, argvs: list):
+ """
+ Run as a command.
+ """
+ self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
+ prog='hub run {}'.format(self.name),
+ usage='%(prog)s',
+ add_help=True)
+
+ self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
+ self.arg_config_group = self.parser.add_argument_group(
+ title="Config options", description="Run configuration for controlling module behavior, not required.")
+ self.add_module_config_arg()
+ self.add_module_input_arg()
+ self.args = self.parser.parse_args(argvs)
+ results = self.enlightening(paths=[self.args.input_path],
+ output_dir=self.args.output_dir,
+ use_gpu=self.args.use_gpu,
+ visualization=self.args.visualization)
+ return results
+
+ @serving
+ def serving_method(self, images, **kwargs):
+ """
+ Run as a service.
+ """
+ images_decode = [base64_to_cv2(image) for image in images]
+ results = self.enlightening(images=images_decode, **kwargs)
+ tolist = [result.tolist() for result in results]
+ return tolist
+
+ def add_module_config_arg(self):
+ """
+ Add the command config options.
+ """
+ self.arg_config_group.add_argument('--use_gpu', action='store_true', help="use GPU or not")
+
+ self.arg_config_group.add_argument('--output_dir',
+ type=str,
+ default='enlightening_result',
+ help='output directory for saving result.')
+ self.arg_config_group.add_argument('--visualization', type=bool, default=False, help='save results or not.')
+
+ def add_module_input_arg(self):
+ """
+ Add the command input options.
+ """
+ self.arg_input_group.add_argument('--input_path', type=str, help="path to input image.")
diff --git a/modules/image/image_processing/enlightengan/util.py b/modules/image/image_processing/enlightengan/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..531a0ae0d487822a870ba7f09817e658967aff10
--- /dev/null
+++ b/modules/image/image_processing/enlightengan/util.py
@@ -0,0 +1,11 @@
+import base64
+
+import cv2
+import numpy as np
+
+
+def base64_to_cv2(b64str):
+ data = base64.b64decode(b64str.encode('utf8'))
+ data = np.fromstring(data, np.uint8)
+ data = cv2.imdecode(data, cv2.IMREAD_COLOR)
+ return data