未验证 提交 0a9ad8d7 编写于 作者: W wanghuancoder 提交者: GitHub

opt dygraph python code (#33997)

* opt dygraph python code, test=develop

* refine, test=develop
上级 7858d332
......@@ -189,10 +189,12 @@ def barrier(group=None):
ring_id = 0 if group is None else group.id
op_type = 'barrier'
temp = fill_constant([1], dtype="int32", value="1")
if in_dygraph_mode():
return core.ops.barrier(temp, temp, 'ring_id', ring_id)
op_type = 'barrier'
if not isinstance(ring_id, int):
raise ValueError("The type of 'group' for barrier must be int.")
helper = LayerHelper(op_type, **locals())
......@@ -717,8 +719,6 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
rank = _get_global_group().rank if group is None else group.rank
nranks = _get_global_group().nranks if group is None else group.nranks
op_type = 'c_scatter'
if rank != gsrc:
tensor_list = []
for _ in range(nranks):
......@@ -728,6 +728,7 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
return core.ops.c_scatter(temp, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id, 'nranks',
nranks, 'root', gsrc)
op_type = 'c_scatter'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'scatter')
......@@ -1488,16 +1489,17 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
return
ring_id = 0 if group is None else group.id
op_type = 'alltoall'
temp = paddle.concat(in_tensor_list, axis=0)
helper = LayerHelper(op_type, **locals())
nranks = len(in_tensor_list)
out = helper.create_variable_for_type_inference(
dtype=in_tensor_list[0].dtype)
if in_dygraph_mode():
core.ops.alltoall_(temp, 'use_calc_stream', use_calc_stream, 'ring_id',
ring_id)
else:
op_type = 'alltoall'
helper = LayerHelper(op_type, **locals())
out = helper.create_variable_for_type_inference(
dtype=in_tensor_list[0].dtype)
nranks = len(in_tensor_list)
if not isinstance(in_tensor_list, list):
raise ValueError("The type of 'in_tensor_list' for all_to_all "
"should be list.")
......@@ -1554,10 +1556,10 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
return
ring_id = 0 if group is None else group.id
op_type = 'send_v2'
if in_dygraph_mode():
return core.ops.send_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', dst)
op_type = 'send_v2'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'send')
......@@ -1604,11 +1606,11 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
return
ring_id = 0 if group is None else group.id
op_type = 'recv_v2'
if in_dygraph_mode():
return core.ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', src, 'dtype',
tensor.dtype, 'out_shape', tensor.shape)
op_type = 'recv_v2'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'recv')
......
......@@ -1023,18 +1023,6 @@ def dropout(x,
if dropout_prob == 0:
return x
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
if (seed is None or
seed == 0) and default_main_program().random_seed != 0:
......@@ -1047,6 +1035,18 @@ def dropout(x,
'dropout_implementation', dropout_implementation)
return out
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
......@@ -5131,12 +5131,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
......@@ -5179,6 +5173,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
......@@ -9387,16 +9387,16 @@ def pad2d(input,
# [5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]]]]
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]}
if isinstance(paddings, Variable):
......@@ -14106,11 +14106,11 @@ def where(condition):
out = layers.where(condition) # [[]]
"""
helper = LayerHelper("where_index", **locals())
if in_dygraph_mode():
return core.ops.where_index(condition)
helper = LayerHelper("where_index", **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
......
......@@ -1403,11 +1403,6 @@ def range(start, end, step, dtype, name=None):
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
if not isinstance(start, Variable):
with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True)
......@@ -1429,6 +1424,11 @@ def range(start, end, step, dtype, name=None):
if in_dygraph_mode():
return core.ops.range(start, end, step)
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'range/arange')
helper = LayerHelper('range', **locals())
......
......@@ -715,20 +715,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
else:
reduce_all_flag = False
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False
if dtype is not None:
if dtype in ['float64', 'int64']:
if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \
(convert_dtype(x.dtype) == "int32" and dtype == "int64"):
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
dtype_flag = True
if in_dygraph_mode():
......@@ -741,6 +732,22 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
else:
return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype is not None:
if dtype in ['float64', 'int64']:
if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \
(convert_dtype(x.dtype) == "int32" and dtype == "int64"):
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sum')
......@@ -1648,6 +1655,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
if in_dygraph_mode():
return core.ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
inputs = {'Input': [x]}
attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
......@@ -1678,10 +1688,6 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode():
return core.ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
if not in_dygraph_mode():
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('trace', **locals())
......@@ -1761,6 +1767,9 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
# [0.17020577, 0.27325270]])
"""
if in_dygraph_mode():
return core.ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
......@@ -1787,9 +1796,6 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode():
return core.ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -2247,18 +2253,17 @@ def all(x, axis=None, keepdim=False, name=None):
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'all')
......@@ -2341,18 +2346,17 @@ def any(x, axis=None, keepdim=False, name=None):
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'any')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册