提交 0dfeb391 编写于 作者: B baiyf

port python3

上级 0db126d8
...@@ -59,7 +59,7 @@ def train(args, config, train_file_list, optimizer_method): ...@@ -59,7 +59,7 @@ def train(args, config, train_file_list, optimizer_method):
loss = network.vgg_ssd_loss() loss = network.vgg_ssd_loss()
fetches = [loss] fetches = [loss]
epocs = 12880 / batch_size epocs = 12880 // batch_size
boundaries = [epocs * 40, epocs * 60, epocs * 80, epocs * 100] boundaries = [epocs * 40, epocs * 60, epocs * 80, epocs * 100]
values = [ values = [
learning_rate, learning_rate * 0.5, learning_rate * 0.25, learning_rate, learning_rate * 0.5, learning_rate * 0.25,
......
import numpy as np import numpy as np
import six
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier from paddle.fluid.initializer import Xavier
...@@ -27,13 +27,13 @@ def conv_block(input, groups, filters, ksizes, strides=None, with_pool=True): ...@@ -27,13 +27,13 @@ def conv_block(input, groups, filters, ksizes, strides=None, with_pool=True):
w_attr = ParamAttr(learning_rate=1., initializer=Xavier()) w_attr = ParamAttr(learning_rate=1., initializer=Xavier())
b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.))
conv = input conv = input
for i in xrange(groups): for i in six.moves.xrange(groups):
conv = fluid.layers.conv2d( conv = fluid.layers.conv2d(
input=conv, input=conv,
num_filters=filters[i], num_filters=filters[i],
filter_size=ksizes[i], filter_size=ksizes[i],
stride=strides[i], stride=strides[i],
padding=(ksizes[i] - 1) / 2, padding=(ksizes[i] - 1) // 2,
param_attr=w_attr, param_attr=w_attr,
bias_attr=b_attr, bias_attr=b_attr,
act='relu') act='relu')
...@@ -220,7 +220,7 @@ class PyramidBox(object): ...@@ -220,7 +220,7 @@ class PyramidBox(object):
def permute_and_reshape(input, last_dim): def permute_and_reshape(input, last_dim):
trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1]) trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1])
compile_shape = [ compile_shape = [
trans.shape[0], np.prod(trans.shape[1:]) / last_dim, last_dim trans.shape[0], np.prod(trans.shape[1:]) // last_dim, last_dim
] ]
run_shape = fluid.layers.assign( run_shape = fluid.layers.assign(
np.array([0, -1, last_dim]).astype("int32")) np.array([0, -1, last_dim]).astype("int32"))
...@@ -291,7 +291,7 @@ class PyramidBox(object): ...@@ -291,7 +291,7 @@ class PyramidBox(object):
def permute_and_reshape(input, last_dim): def permute_and_reshape(input, last_dim):
trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1]) trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1])
compile_shape = [ compile_shape = [
trans.shape[0], np.prod(trans.shape[1:]) / last_dim, last_dim trans.shape[0], np.prod(trans.shape[1:]) // last_dim, last_dim
] ]
run_shape = fluid.layers.assign( run_shape = fluid.layers.assign(
np.array([0, -1, last_dim]).astype("int32")) np.array([0, -1, last_dim]).astype("int32"))
......
...@@ -24,6 +24,7 @@ import time ...@@ -24,6 +24,7 @@ import time
import copy import copy
import random import random
import cv2 import cv2
import six
from data_util import GeneratorEnqueuer from data_util import GeneratorEnqueuer
...@@ -151,7 +152,7 @@ def preprocess(img, bbox_labels, mode, settings, image_path): ...@@ -151,7 +152,7 @@ def preprocess(img, bbox_labels, mode, settings, image_path):
mirror = int(random.uniform(0, 2)) mirror = int(random.uniform(0, 2))
if mirror == 1: if mirror == 1:
img = img[:, ::-1, :] img = img[:, ::-1, :]
for i in xrange(len(sampled_labels)): for i in six.moves.xrange(len(sampled_labels)):
tmp = sampled_labels[i][1] tmp = sampled_labels[i][1]
sampled_labels[i][1] = 1 - sampled_labels[i][3] sampled_labels[i][1] = 1 - sampled_labels[i][3]
sampled_labels[i][3] = 1 - tmp sampled_labels[i][3] = 1 - tmp
......
...@@ -57,7 +57,7 @@ def train(args, config, train_file_list, optimizer_method): ...@@ -57,7 +57,7 @@ def train(args, config, train_file_list, optimizer_method):
loss = network.vgg_ssd_loss() loss = network.vgg_ssd_loss()
fetches = [loss] fetches = [loss]
steps_per_pass = 12880 / batch_size steps_per_pass = 12880 // batch_size
boundaries = [steps_per_pass * 50, steps_per_pass * 80, boundaries = [steps_per_pass * 50, steps_per_pass * 80,
steps_per_pass * 120, steps_per_pass * 140] steps_per_pass * 120, steps_per_pass * 140]
values = [ values = [
...@@ -110,7 +110,7 @@ def train(args, config, train_file_list, optimizer_method): ...@@ -110,7 +110,7 @@ def train(args, config, train_file_list, optimizer_method):
model_path = os.path.join(model_save_dir, postfix) model_path = os.path.join(model_save_dir, postfix)
if os.path.isdir(model_path): if os.path.isdir(model_path):
shutil.rmtree(model_path) shutil.rmtree(model_path)
print 'save models to %s' % (model_path) print('save models to %s' % (model_path))
fluid.io.save_persistables(exe, model_path) fluid.io.save_persistables(exe, model_path)
def tensor(data, place, lod=None): def tensor(data, place, lod=None):
......
...@@ -16,6 +16,7 @@ from __future__ import absolute_import ...@@ -16,6 +16,7 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import distutils.util import distutils.util
import six
def print_arguments(args): def print_arguments(args):
...@@ -34,7 +35,7 @@ def print_arguments(args): ...@@ -34,7 +35,7 @@ def print_arguments(args):
:type args: argparse.Namespace :type args: argparse.Namespace
""" """
print("----------- Configuration Arguments -----------") print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()): for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value)) print("%s: %s" % (arg, value))
print("------------------------------------------------") print("------------------------------------------------")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册