提交 3b5012be 编写于 作者: J jack

Merge branch 'develop' of github.com:PaddlePaddle/PaddleX into develop

...@@ -25,4 +25,3 @@ ...@@ -25,4 +25,3 @@
* **硬盘空间**:建议SSD剩余空间1T以上(非必须) * **硬盘空间**:建议SSD剩余空间1T以上(非必须)
***注:PaddleX在Windows及Mac OS系统只支持单卡模型。Windows系统暂不支持NCCL。*** ***注:PaddleX在Windows及Mac OS系统只支持单卡模型。Windows系统暂不支持NCCL。***
...@@ -49,7 +49,7 @@ PaddlePaddle C++ 预测库针对是否使用GPU、是否支持TensorRT、以及 ...@@ -49,7 +49,7 @@ PaddlePaddle C++ 预测库针对是否使用GPU、是否支持TensorRT、以及
### Step3: 安装配置OpenCV ### Step3: 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download) 1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://bj.bcebos.com/paddleseg/deploy/opencv-3.4.6-vc14_vc15.exe)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,例如`D:\projects\opencv` 2. 运行下载的可执行文件,将OpenCV解压至指定目录,例如`D:\projects\opencv`
3. 配置环境变量,如下流程所示 3. 配置环境变量,如下流程所示
- 我的电脑->属性->高级系统设置->环境变量 - 我的电脑->属性->高级系统设置->环境变量
......
...@@ -106,8 +106,11 @@ class VOCDetection(Dataset): ...@@ -106,8 +106,11 @@ class VOCDetection(Dataset):
ct = int(tree.find('id').text) ct = int(tree.find('id').text)
im_id = np.array([int(tree.find('id').text)]) im_id = np.array([int(tree.find('id').text)])
pattern = re.compile('<object>', re.IGNORECASE) pattern = re.compile('<object>', re.IGNORECASE)
obj_tag = pattern.findall( obj_match = pattern.findall(
str(ET.tostringlist(tree.getroot())))[0][1:-1] str(ET.tostringlist(tree.getroot())))
if len(obj_match) == 0:
continue
obj_tag = obj_match[0][1:-1]
objs = tree.findall(obj_tag) objs = tree.findall(obj_tag)
pattern = re.compile('<size>', re.IGNORECASE) pattern = re.compile('<size>', re.IGNORECASE)
size_tag = pattern.findall( size_tag = pattern.findall(
......
...@@ -73,6 +73,7 @@ class BaseAPI: ...@@ -73,6 +73,7 @@ class BaseAPI:
self.status = 'Normal' self.status = 'Normal'
# 已完成迭代轮数,为恢复训练时的起始轮数 # 已完成迭代轮数,为恢复训练时的起始轮数
self.completed_epochs = 0 self.completed_epochs = 0
self.scope = fluid.global_scope()
def _get_single_card_bs(self, batch_size): def _get_single_card_bs(self, batch_size):
if batch_size % len(self.places) == 0: if batch_size % len(self.places) == 0:
...@@ -84,6 +85,10 @@ class BaseAPI: ...@@ -84,6 +85,10 @@ class BaseAPI:
'place'])) 'place']))
def build_program(self): def build_program(self):
if hasattr(paddlex, 'model_built') and paddlex.model_built:
logging.error(
"Function model.train() only can be called once in your code.")
paddlex.model_built = True
# 构建训练网络 # 构建训练网络
self.train_inputs, self.train_outputs = self.build_net(mode='train') self.train_inputs, self.train_outputs = self.build_net(mode='train')
self.train_prog = fluid.default_main_program() self.train_prog = fluid.default_main_program()
...@@ -155,7 +160,7 @@ class BaseAPI: ...@@ -155,7 +160,7 @@ class BaseAPI:
outputs=self.test_outputs, outputs=self.test_outputs,
batch_size=batch_size, batch_size=batch_size,
batch_nums=batch_num, batch_nums=batch_num,
scope=None, scope=self.scope,
algo='KL', algo='KL',
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False, is_full_quantize=False,
...@@ -244,8 +249,8 @@ class BaseAPI: ...@@ -244,8 +249,8 @@ class BaseAPI:
logging.info( logging.info(
"Load pretrain weights from {}.".format(pretrain_weights), "Load pretrain weights from {}.".format(pretrain_weights),
use_color=True) use_color=True)
paddlex.utils.utils.load_pretrain_weights( paddlex.utils.utils.load_pretrain_weights(self.exe, self.train_prog,
self.exe, self.train_prog, pretrain_weights, fuse_bn) pretrain_weights, fuse_bn)
# 进行裁剪 # 进行裁剪
if sensitivities_file is not None: if sensitivities_file is not None:
import paddleslim import paddleslim
...@@ -349,10 +354,9 @@ class BaseAPI: ...@@ -349,10 +354,9 @@ class BaseAPI:
logging.info("Model saved in {}.".format(save_dir)) logging.info("Model saved in {}.".format(save_dir))
def export_inference_model(self, save_dir): def export_inference_model(self, save_dir):
test_input_names = [ test_input_names = [var.name for var in list(self.test_inputs.values())]
var.name for var in list(self.test_inputs.values())
]
test_outputs = list(self.test_outputs.values()) test_outputs = list(self.test_outputs.values())
with fluid.scope_guard(self.scope):
if self.__class__.__name__ == 'MaskRCNN': if self.__class__.__name__ == 'MaskRCNN':
from paddlex.utils.save import save_mask_inference_model from paddlex.utils.save import save_mask_inference_model
save_mask_inference_model( save_mask_inference_model(
...@@ -388,8 +392,7 @@ class BaseAPI: ...@@ -388,8 +392,7 @@ class BaseAPI:
# 模型保存成功的标志 # 模型保存成功的标志
open(osp.join(save_dir, '.success'), 'w').close() open(osp.join(save_dir, '.success'), 'w').close()
logging.info("Model for inference deploy saved in {}.".format( logging.info("Model for inference deploy saved in {}.".format(save_dir))
save_dir))
def train_loop(self, def train_loop(self,
num_epochs, num_epochs,
...@@ -513,12 +516,10 @@ class BaseAPI: ...@@ -513,12 +516,10 @@ class BaseAPI:
eta = ((num_epochs - i) * total_num_steps - step - 1 eta = ((num_epochs - i) * total_num_steps - step - 1
) * avg_step_time ) * avg_step_time
if time_eval_one_epoch is not None: if time_eval_one_epoch is not None:
eval_eta = ( eval_eta = (total_eval_times - i // save_interval_epochs
total_eval_times - i // save_interval_epochs
) * time_eval_one_epoch ) * time_eval_one_epoch
else: else:
eval_eta = ( eval_eta = (total_eval_times - i // save_interval_epochs
total_eval_times - i // save_interval_epochs
) * total_num_steps_eval * avg_step_time ) * total_num_steps_eval * avg_step_time
eta_str = seconds_to_hms(eta + eval_eta) eta_str = seconds_to_hms(eta + eval_eta)
......
...@@ -227,6 +227,7 @@ class BaseClassifier(BaseAPI): ...@@ -227,6 +227,7 @@ class BaseClassifier(BaseAPI):
true_labels = list() true_labels = list()
pred_scores = list() pred_scores = list()
if not hasattr(self, 'parallel_test_prog'): if not hasattr(self, 'parallel_test_prog'):
with fluid.scope_guard(self.scope):
self.parallel_test_prog = fluid.CompiledProgram( self.parallel_test_prog = fluid.CompiledProgram(
self.test_prog).with_data_parallel( self.test_prog).with_data_parallel(
share_vars_from=self.parallel_train_prog) share_vars_from=self.parallel_train_prog)
...@@ -242,7 +243,9 @@ class BaseClassifier(BaseAPI): ...@@ -242,7 +243,9 @@ class BaseClassifier(BaseAPI):
num_pad_samples = batch_size - num_samples num_pad_samples = batch_size - num_samples
pad_images = np.tile(images[0:1], (num_pad_samples, 1, 1, 1)) pad_images = np.tile(images[0:1], (num_pad_samples, 1, 1, 1))
images = np.concatenate([images, pad_images]) images = np.concatenate([images, pad_images])
outputs = self.exe.run(self.parallel_test_prog, with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.parallel_test_prog,
feed={'image': images}, feed={'image': images},
fetch_list=list(self.test_outputs.values())) fetch_list=list(self.test_outputs.values()))
outputs = [outputs[0][:num_samples]] outputs = [outputs[0][:num_samples]]
...@@ -286,6 +289,7 @@ class BaseClassifier(BaseAPI): ...@@ -286,6 +289,7 @@ class BaseClassifier(BaseAPI):
self.arrange_transforms( self.arrange_transforms(
transforms=self.test_transforms, mode='test') transforms=self.test_transforms, mode='test')
im = self.test_transforms(img_file) im = self.test_transforms(img_file)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog, result = self.exe.run(self.test_prog,
feed={'image': im}, feed={'image': im},
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
......
...@@ -317,19 +317,18 @@ class DeepLabv3p(BaseAPI): ...@@ -317,19 +317,18 @@ class DeepLabv3p(BaseAPI):
tuple (metrics, eval_details):当return_details为True时,增加返回dict (eval_details), tuple (metrics, eval_details):当return_details为True时,增加返回dict (eval_details),
包含关键字:'confusion_matrix',表示评估的混淆矩阵。 包含关键字:'confusion_matrix',表示评估的混淆矩阵。
""" """
self.arrange_transforms( self.arrange_transforms(transforms=eval_dataset.transforms, mode='eval')
transforms=eval_dataset.transforms, mode='eval')
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size) total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
conf_mat = ConfusionMatrix(self.num_classes, streaming=True) conf_mat = ConfusionMatrix(self.num_classes, streaming=True)
data_generator = eval_dataset.generator( data_generator = eval_dataset.generator(
batch_size=batch_size, drop_last=False) batch_size=batch_size, drop_last=False)
if not hasattr(self, 'parallel_test_prog'): if not hasattr(self, 'parallel_test_prog'):
with fluid.scope_guard(self.scope):
self.parallel_test_prog = fluid.CompiledProgram( self.parallel_test_prog = fluid.CompiledProgram(
self.test_prog).with_data_parallel( self.test_prog).with_data_parallel(
share_vars_from=self.parallel_train_prog) share_vars_from=self.parallel_train_prog)
logging.info( logging.info("Start to evaluating(total_samples={}, total_steps={})...".
"Start to evaluating(total_samples={}, total_steps={})...".format( format(eval_dataset.num_samples, total_steps))
eval_dataset.num_samples, total_steps))
for step, data in tqdm.tqdm( for step, data in tqdm.tqdm(
enumerate(data_generator()), total=total_steps): enumerate(data_generator()), total=total_steps):
images = np.array([d[0] for d in data]) images = np.array([d[0] for d in data])
...@@ -350,7 +349,9 @@ class DeepLabv3p(BaseAPI): ...@@ -350,7 +349,9 @@ class DeepLabv3p(BaseAPI):
pad_images = np.tile(images[0:1], (num_pad_samples, 1, 1, 1)) pad_images = np.tile(images[0:1], (num_pad_samples, 1, 1, 1))
images = np.concatenate([images, pad_images]) images = np.concatenate([images, pad_images])
feed_data = {'image': images} feed_data = {'image': images}
outputs = self.exe.run(self.parallel_test_prog, with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.parallel_test_prog,
feed=feed_data, feed=feed_data,
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
return_numpy=True) return_numpy=True)
...@@ -399,6 +400,7 @@ class DeepLabv3p(BaseAPI): ...@@ -399,6 +400,7 @@ class DeepLabv3p(BaseAPI):
transforms=self.test_transforms, mode='test') transforms=self.test_transforms, mode='test')
im, im_info = self.test_transforms(im_file) im, im_info = self.test_transforms(im_file)
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog, result = self.exe.run(self.test_prog,
feed={'image': im}, feed={'image': im},
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
......
...@@ -325,7 +325,9 @@ class FasterRCNN(BaseAPI): ...@@ -325,7 +325,9 @@ class FasterRCNN(BaseAPI):
'im_info': im_infos, 'im_info': im_infos,
'im_shape': im_shapes, 'im_shape': im_shapes,
} }
outputs = self.exe.run(self.test_prog, with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.test_prog,
feed=[feed_data], feed=[feed_data],
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
return_numpy=False) return_numpy=False)
...@@ -388,6 +390,7 @@ class FasterRCNN(BaseAPI): ...@@ -388,6 +390,7 @@ class FasterRCNN(BaseAPI):
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
im_resize_info = np.expand_dims(im_resize_info, axis=0) im_resize_info = np.expand_dims(im_resize_info, axis=0)
im_shape = np.expand_dims(im_shape, axis=0) im_shape = np.expand_dims(im_shape, axis=0)
with fluid.scope_guard(self.scope):
outputs = self.exe.run(self.test_prog, outputs = self.exe.run(self.test_prog,
feed={ feed={
'image': im, 'image': im,
......
...@@ -24,6 +24,7 @@ import paddlex.utils.logging as logging ...@@ -24,6 +24,7 @@ import paddlex.utils.logging as logging
def load_model(model_dir, fixed_input_shape=None): def load_model(model_dir, fixed_input_shape=None):
model_scope = fluid.Scope()
if not osp.exists(osp.join(model_dir, "model.yml")): if not osp.exists(osp.join(model_dir, "model.yml")):
raise Exception("There's not model.yml in {}".format(model_dir)) raise Exception("There's not model.yml in {}".format(model_dir))
with open(osp.join(model_dir, "model.yml")) as f: with open(osp.join(model_dir, "model.yml")) as f:
...@@ -51,6 +52,7 @@ def load_model(model_dir, fixed_input_shape=None): ...@@ -51,6 +52,7 @@ def load_model(model_dir, fixed_input_shape=None):
format(fixed_input_shape)) format(fixed_input_shape))
model.fixed_input_shape = fixed_input_shape model.fixed_input_shape = fixed_input_shape
with fluid.scope_guard(model_scope):
if status == "Normal" or \ if status == "Normal" or \
status == "Prune" or status == "fluid.save": status == "Prune" or status == "fluid.save":
startup_prog = fluid.Program() startup_prog = fluid.Program()
...@@ -79,7 +81,8 @@ def load_model(model_dir, fixed_input_shape=None): ...@@ -79,7 +81,8 @@ def load_model(model_dir, fixed_input_shape=None):
model.test_inputs = OrderedDict() model.test_inputs = OrderedDict()
model.test_outputs = OrderedDict() model.test_outputs = OrderedDict()
for name in input_names: for name in input_names:
model.test_inputs[name] = model.test_prog.global_block().var(name) model.test_inputs[name] = model.test_prog.global_block().var(
name)
for i, out in enumerate(outputs): for i, out in enumerate(outputs):
var_desc = test_outputs_info[i] var_desc = test_outputs_info[i]
model.test_outputs[var_desc[0]] = out model.test_outputs[var_desc[0]] = out
...@@ -107,6 +110,7 @@ def load_model(model_dir, fixed_input_shape=None): ...@@ -107,6 +110,7 @@ def load_model(model_dir, fixed_input_shape=None):
model.__dict__[k] = v model.__dict__[k] = v
logging.info("Model[{}] loaded.".format(info['Model'])) logging.info("Model[{}] loaded.".format(info['Model']))
model.scope = model_scope
model.trainable = False model.trainable = False
model.status = status model.status = status
return model return model
......
...@@ -286,7 +286,9 @@ class MaskRCNN(FasterRCNN): ...@@ -286,7 +286,9 @@ class MaskRCNN(FasterRCNN):
'im_info': im_infos, 'im_info': im_infos,
'im_shape': im_shapes, 'im_shape': im_shapes,
} }
outputs = self.exe.run(self.test_prog, with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.test_prog,
feed=[feed_data], feed=[feed_data],
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
return_numpy=False) return_numpy=False)
...@@ -356,6 +358,7 @@ class MaskRCNN(FasterRCNN): ...@@ -356,6 +358,7 @@ class MaskRCNN(FasterRCNN):
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
im_resize_info = np.expand_dims(im_resize_info, axis=0) im_resize_info = np.expand_dims(im_resize_info, axis=0)
im_shape = np.expand_dims(im_shape, axis=0) im_shape = np.expand_dims(im_shape, axis=0)
with fluid.scope_guard(self.scope):
outputs = self.exe.run(self.test_prog, outputs = self.exe.run(self.test_prog,
feed={ feed={
'image': im, 'image': im,
......
...@@ -154,8 +154,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -154,8 +154,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
logging.info("Start to run batch!") logging.info("Start to run batch!")
for data in self._data_loader(): for data in self._data_loader():
start = time.time() start = time.time()
self._executor.run( with fluid.scope_guard(self._scope):
program=self._program, self._executor.run(program=self._program,
feed=data, feed=data,
fetch_list=self._fetch_list, fetch_list=self._fetch_list,
return_numpy=False) return_numpy=False)
...@@ -164,10 +164,9 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -164,10 +164,9 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
else: else:
self._sample_threshold() self._sample_threshold()
end = time.time() end = time.time()
logging.debug('[Run batch data] Batch={}/{}, time_each_batch={} s.'.format( logging.debug(
str(batch_id + 1), '[Run batch data] Batch={}/{}, time_each_batch={} s.'.format(
str(batch_ct), str(batch_id + 1), str(batch_ct), str(end - start)))
str(end-start)))
batch_id += 1 batch_id += 1
if self._batch_nums and batch_id >= self._batch_nums: if self._batch_nums and batch_id >= self._batch_nums:
break break
...@@ -194,6 +193,7 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -194,6 +193,7 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
Returns: Returns:
None None
''' '''
with fluid.scope_guard(self._scope):
feed_vars_names = [var.name for var in self._feed_list] feed_vars_names = [var.name for var in self._feed_list]
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=save_model_path, dirname=save_model_path,
...@@ -212,7 +212,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -212,7 +212,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
self._data_loader = fluid.io.DataLoader.from_generator( self._data_loader = fluid.io.DataLoader.from_generator(
feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True) feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True)
self._data_loader.set_sample_list_generator( self._data_loader.set_sample_list_generator(
self._dataset.generator(self._batch_size, drop_last=True), self._dataset.generator(
self._batch_size, drop_last=True),
places=self._place) places=self._place)
def _calculate_kl_threshold(self): def _calculate_kl_threshold(self):
...@@ -235,10 +236,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -235,10 +236,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
weight_threshold.append(abs_max_value) weight_threshold.append(abs_max_value)
self._quantized_var_kl_threshold[var_name] = weight_threshold self._quantized_var_kl_threshold[var_name] = weight_threshold
end = time.time() end = time.time()
logging.debug('[Calculate weight] Weight_id={}/{}, time_each_weight={} s.'.format( logging.debug(
'[Calculate weight] Weight_id={}/{}, time_each_weight={} s.'.
format(
str(ct), str(ct),
str(len(self._quantized_weight_var_name)), str(len(self._quantized_weight_var_name)), str(end -
str(end-start))) start)))
ct += 1 ct += 1
ct = 1 ct = 1
...@@ -257,10 +260,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -257,10 +260,12 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
self._quantized_var_kl_threshold[var_name] = \ self._quantized_var_kl_threshold[var_name] = \
self._get_kl_scaling_factor(np.abs(sampling_data)) self._get_kl_scaling_factor(np.abs(sampling_data))
end = time.time() end = time.time()
logging.debug('[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.format( logging.debug(
'[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.
format(
str(ct), str(ct),
str(len(self._quantized_act_var_name)), str(len(self._quantized_act_var_name)),
str(end-start))) str(end - start)))
ct += 1 ct += 1
else: else:
for var_name in self._quantized_act_var_name: for var_name in self._quantized_act_var_name:
...@@ -270,10 +275,10 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization): ...@@ -270,10 +275,10 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
self._quantized_var_kl_threshold[var_name] = \ self._quantized_var_kl_threshold[var_name] = \
self._get_kl_scaling_factor(np.abs(self._sampling_data[var_name])) self._get_kl_scaling_factor(np.abs(self._sampling_data[var_name]))
end = time.time() end = time.time()
logging.debug('[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.format( logging.debug(
'[Calculate activation] Activation_id={}/{}, time_each_activation={} s.'.
format(
str(ct), str(ct),
str(len(self._quantized_act_var_name)), str(len(self._quantized_act_var_name)),
str(end-start))) str(end - start)))
ct += 1 ct += 1
\ No newline at end of file
...@@ -313,7 +313,9 @@ class YOLOv3(BaseAPI): ...@@ -313,7 +313,9 @@ class YOLOv3(BaseAPI):
images = np.array([d[0] for d in data]) images = np.array([d[0] for d in data])
im_sizes = np.array([d[1] for d in data]) im_sizes = np.array([d[1] for d in data])
feed_data = {'image': images, 'im_size': im_sizes} feed_data = {'image': images, 'im_size': im_sizes}
outputs = self.exe.run(self.test_prog, with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.test_prog,
feed=[feed_data], feed=[feed_data],
fetch_list=list(self.test_outputs.values()), fetch_list=list(self.test_outputs.values()),
return_numpy=False) return_numpy=False)
...@@ -366,6 +368,7 @@ class YOLOv3(BaseAPI): ...@@ -366,6 +368,7 @@ class YOLOv3(BaseAPI):
im, im_size = self.test_transforms(img_file) im, im_size = self.test_transforms(img_file)
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
im_size = np.expand_dims(im_size, axis=0) im_size = np.expand_dims(im_size, axis=0)
with fluid.scope_guard(self.scope):
outputs = self.exe.run(self.test_prog, outputs = self.exe.run(self.test_prog,
feed={'image': im, feed={'image': im,
'im_size': im_size}, 'im_size': im_size},
......
...@@ -73,7 +73,7 @@ def cls_compose(im, label=None, transforms=None, vdl_writer=None, step=0): ...@@ -73,7 +73,7 @@ def cls_compose(im, label=None, transforms=None, vdl_writer=None, step=0):
raise TypeError('Can\'t read The image file {}!'.format(im)) raise TypeError('Can\'t read The image file {}!'.format(im))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if vdl_writer is not None: if vdl_writer is not None:
vdl_writer.add_image(tag='0. OriginalImange/' + str(step), vdl_writer.add_image(tag='0. OriginalImage/' + str(step),
img=im, img=im,
step=0) step=0)
op_id = 1 op_id = 1
...@@ -148,7 +148,7 @@ def det_compose(im, im_info=None, label_info=None, transforms=None, vdl_writer=N ...@@ -148,7 +148,7 @@ def det_compose(im, im_info=None, label_info=None, transforms=None, vdl_writer=N
if len(outputs) == 3: if len(outputs) == 3:
label_info = outputs[2] label_info = outputs[2]
if vdl_writer is not None: if vdl_writer is not None:
vdl_writer.add_image(tag='0. OriginalImange/' + str(step), vdl_writer.add_image(tag='0. OriginalImage/' + str(step),
img=im, img=im,
step=0) step=0)
op_id = 1 op_id = 1
...@@ -209,7 +209,7 @@ def det_compose(im, im_info=None, label_info=None, transforms=None, vdl_writer=N ...@@ -209,7 +209,7 @@ def det_compose(im, im_info=None, label_info=None, transforms=None, vdl_writer=N
if vdl_writer is not None: if vdl_writer is not None:
tag = str(op_id) + '. ' + op.__class__.__name__ + '/' + str(step) tag = str(op_id) + '. ' + op.__class__.__name__ + '/' + str(step)
if op is None: if op is None:
tag = str(op_id) + '. OriginalImangeWithGTBox/' + str(step) tag = str(op_id) + '. OriginalImageWithGTBox/' + str(step)
vdl_writer.add_image(tag=tag, vdl_writer.add_image(tag=tag,
img=vdl_im, img=vdl_im,
step=0) step=0)
...@@ -233,7 +233,7 @@ def seg_compose(im, im_info=None, label=None, transforms=None, vdl_writer=None, ...@@ -233,7 +233,7 @@ def seg_compose(im, im_info=None, label=None, transforms=None, vdl_writer=None,
if not isinstance(label, np.ndarray): if not isinstance(label, np.ndarray):
label = np.asarray(Image.open(label)) label = np.asarray(Image.open(label))
if vdl_writer is not None: if vdl_writer is not None:
vdl_writer.add_image(tag='0. OriginalImange' + '/' + str(step), vdl_writer.add_image(tag='0. OriginalImage' + '/' + str(step),
img=im, img=im,
step=0) step=0)
op_id = 1 op_id = 1
......
...@@ -100,7 +100,7 @@ class LabelMe2COCO(X2COCO): ...@@ -100,7 +100,7 @@ class LabelMe2COCO(X2COCO):
image["height"] = json_info["imageHeight"] image["height"] = json_info["imageHeight"]
image["width"] = json_info["imageWidth"] image["width"] = json_info["imageWidth"]
image["id"] = image_id + 1 image["id"] = image_id + 1
image["file_name"] = json_info["imagePath"].split("/")[-1] image["file_name"] = osp.split(json_info["imagePath"])[-1]
return image return image
def generate_polygon_anns_field(self, height, width, def generate_polygon_anns_field(self, height, width,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册