未验证 提交 7595690b 编写于 作者: L Lyon 提交者: GitHub

Merge pull request #46 from Oneflow-Inc/of_develop_py3_luyang

Of develop py3 luyang
......@@ -601,7 +601,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \
--val_batch_size_per_device=512 \
--num_epoch=90 \
--use_fp16=false \
--use_boxing_v2=false \
--model="alexnet" \
```
......@@ -630,7 +629,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \
--val_batch_size_per_device=128 \
--num_epoch=90 \
--use_fp16=false \
--use_boxing_v2=false \
--model="vgg" \
```
......
......@@ -50,14 +50,6 @@ def get_parser(parser=None):
const=True,
help='Whether to use use fp16'
)
parser.add_argument(
'--use_boxing_v2',
type=str2bool,
nargs='?',
const=True,
help='Whether to use boxing v2'
)
parser.add_argument(
'--channel_last',
type=str2bool,
......
......@@ -8,7 +8,7 @@ from optimizer_util import gen_model_update_conf
def _default_config(args):
config = flow.function_config()
config.default_distribute_strategy(flow.distribute.consistent_strategy())
config.default_distribute_strategy(flow.scope.consistent_view())
config.default_data_type(flow.float)
if args.use_fp16:
config.enable_auto_mixed_precision(True)
......@@ -22,8 +22,6 @@ def get_train_config(args):
train_config.all_reduce_group_min_mbyte(8)
train_config.all_reduce_group_num(128)
if args.use_boxing_v2:
train_config.use_boxing_v2(True)
train_config.prune_parallel_cast_ops(True)
train_config.train.model_update_conf(gen_model_update_conf(args))
......
......@@ -43,8 +43,8 @@ def main():
image = load_image(args.image_path)
predictions = InferenceNet(image).get()
clsidx = predictions.ndarray().argmax()
print(predictions.ndarray().max(), clsidx_2_labels[clsidx])
clsidx = predictions.numpy().argmax()
print(predictions.numpy().max(), clsidx_2_labels[clsidx])
if __name__ == "__main__":
......
......@@ -34,10 +34,6 @@ model_dict = {
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.config.enable_debug_mode(True)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
def label_smoothing(labels, classes, eta, dtype):
assert classes > 0
......
......@@ -141,7 +141,7 @@ def resnet50(images, trainable=True, need_transpose=False, training=True, wd=1.0
if channel_last:
# if channel_last=True, then change mode from 'nchw' to 'nhwc'
images = flow.transpose(images, name="transpose", perm=[0, 2, 3, 1])
with flow.deprecated.variable_scope("Resnet"):
with flow.scope.namespace("Resnet"):
stem = builder.resnet_stem(images)
body = builder.resnet_conv_x_body(stem)
pool5 = flow.nn.avg_pool2d(
......
......@@ -85,8 +85,8 @@ class StopWatch(object):
def match_top_k(predictions, labels, top_k=1):
max_k_preds = np.argpartition(predictions.ndarray(), -top_k)[:, -top_k:]
match_array = np.logical_or.reduce(max_k_preds==labels.reshape((-1, 1)), axis=1)
max_k_preds = np.argpartition(predictions.numpy(), -top_k)[:, -top_k:]
match_array = np.logical_or.reduce(max_k_preds == labels.reshape((-1, 1)), axis=1)
num_matched = match_array.sum()
return num_matched, match_array.shape[0]
......
......@@ -111,7 +111,6 @@ def _get_train_conf():
'lazy_adam_conf': {
}
})
train_conf.use_boxing_v2(True)
train_conf.default_distribute_strategy(flow.distribute.consistent_strategy())
train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding'])))
return train_conf
......
......@@ -118,7 +118,6 @@ def _get_train_conf():
'lazy_adam_conf': {
}
})
train_conf.use_boxing_v2(True)
train_conf.default_distribute_strategy(flow.distribute.consistent_strategy())
train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding'])))
return train_conf
......
......@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
......@@ -164,8 +162,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF)
if args.use_fp16:
config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config)
......@@ -206,9 +202,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1:
......
......@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
......@@ -160,8 +158,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF)
if args.use_fp16:
config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config)
......@@ -202,9 +198,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1:
......
......@@ -38,8 +38,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
......@@ -156,8 +154,6 @@ config.default_distribute_strategy(flow.distribute.consistent_strategy())
if args.use_fp16:
config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config)
......@@ -198,9 +194,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册