未验证 提交 0a9ad8d7 编写于 作者: W wanghuancoder 提交者: GitHub

opt dygraph python code (#33997)

* opt dygraph python code, test=develop

* refine, test=develop
上级 7858d332
...@@ -189,10 +189,12 @@ def barrier(group=None): ...@@ -189,10 +189,12 @@ def barrier(group=None):
ring_id = 0 if group is None else group.id ring_id = 0 if group is None else group.id
op_type = 'barrier'
temp = fill_constant([1], dtype="int32", value="1") temp = fill_constant([1], dtype="int32", value="1")
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.barrier(temp, temp, 'ring_id', ring_id) return core.ops.barrier(temp, temp, 'ring_id', ring_id)
op_type = 'barrier'
if not isinstance(ring_id, int): if not isinstance(ring_id, int):
raise ValueError("The type of 'group' for barrier must be int.") raise ValueError("The type of 'group' for barrier must be int.")
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
...@@ -717,8 +719,6 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True): ...@@ -717,8 +719,6 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
rank = _get_global_group().rank if group is None else group.rank rank = _get_global_group().rank if group is None else group.rank
nranks = _get_global_group().nranks if group is None else group.nranks nranks = _get_global_group().nranks if group is None else group.nranks
op_type = 'c_scatter'
if rank != gsrc: if rank != gsrc:
tensor_list = [] tensor_list = []
for _ in range(nranks): for _ in range(nranks):
...@@ -728,6 +728,7 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True): ...@@ -728,6 +728,7 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
return core.ops.c_scatter(temp, tensor, 'use_calc_stream', return core.ops.c_scatter(temp, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id, 'nranks', use_calc_stream, 'ring_id', ring_id, 'nranks',
nranks, 'root', gsrc) nranks, 'root', gsrc)
op_type = 'c_scatter'
check_variable_and_dtype( check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'scatter') 'scatter')
...@@ -1488,16 +1489,17 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True): ...@@ -1488,16 +1489,17 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
return return
ring_id = 0 if group is None else group.id ring_id = 0 if group is None else group.id
op_type = 'alltoall'
temp = paddle.concat(in_tensor_list, axis=0) temp = paddle.concat(in_tensor_list, axis=0)
helper = LayerHelper(op_type, **locals())
nranks = len(in_tensor_list)
out = helper.create_variable_for_type_inference(
dtype=in_tensor_list[0].dtype)
if in_dygraph_mode(): if in_dygraph_mode():
core.ops.alltoall_(temp, 'use_calc_stream', use_calc_stream, 'ring_id', core.ops.alltoall_(temp, 'use_calc_stream', use_calc_stream, 'ring_id',
ring_id) ring_id)
else: else:
op_type = 'alltoall'
helper = LayerHelper(op_type, **locals())
out = helper.create_variable_for_type_inference(
dtype=in_tensor_list[0].dtype)
nranks = len(in_tensor_list)
if not isinstance(in_tensor_list, list): if not isinstance(in_tensor_list, list):
raise ValueError("The type of 'in_tensor_list' for all_to_all " raise ValueError("The type of 'in_tensor_list' for all_to_all "
"should be list.") "should be list.")
...@@ -1554,10 +1556,10 @@ def send(tensor, dst=0, group=None, use_calc_stream=True): ...@@ -1554,10 +1556,10 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
return return
ring_id = 0 if group is None else group.id ring_id = 0 if group is None else group.id
op_type = 'send_v2'
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.send_v2(tensor, 'use_calc_stream', use_calc_stream, return core.ops.send_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', dst) 'ring_id', ring_id, 'peer', dst)
op_type = 'send_v2'
check_variable_and_dtype( check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'send') 'send')
...@@ -1604,11 +1606,11 @@ def recv(tensor, src=0, group=None, use_calc_stream=True): ...@@ -1604,11 +1606,11 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
return return
ring_id = 0 if group is None else group.id ring_id = 0 if group is None else group.id
op_type = 'recv_v2'
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream, return core.ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream,
'ring_id', ring_id, 'peer', src, 'dtype', 'ring_id', ring_id, 'peer', src, 'dtype',
tensor.dtype, 'out_shape', tensor.shape) tensor.dtype, 'out_shape', tensor.shape)
op_type = 'recv_v2'
check_variable_and_dtype( check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'recv') 'recv')
......
...@@ -1023,18 +1023,6 @@ def dropout(x, ...@@ -1023,18 +1023,6 @@ def dropout(x,
if dropout_prob == 0: if dropout_prob == 0:
return x return x
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode(): if in_dygraph_mode():
if (seed is None or if (seed is None or
seed == 0) and default_main_program().random_seed != 0: seed == 0) and default_main_program().random_seed != 0:
...@@ -1047,6 +1035,18 @@ def dropout(x, ...@@ -1047,6 +1035,18 @@ def dropout(x,
'dropout_implementation', dropout_implementation) 'dropout_implementation', dropout_implementation)
return out return out
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
helper = LayerHelper('dropout', **locals()) helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout') 'dropout')
...@@ -5131,12 +5131,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -5131,12 +5131,6 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True) out = fluid.layers.matmul(x, y, True, True)
""" """
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode(): if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype) out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
...@@ -5179,6 +5173,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -5179,6 +5173,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, " "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape)) "Y's shape: %s.\n" % (i, i, x_shape, y_shape))
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
__check_input(x, y) __check_input(x, y)
helper = LayerHelper('matmul', **locals()) helper = LayerHelper('matmul', **locals())
...@@ -9387,16 +9387,16 @@ def pad2d(input, ...@@ -9387,16 +9387,16 @@ def pad2d(input,
# [5. 4. 5. 6. 5.] # [5. 4. 5. 6. 5.]
# [2. 1. 2. 3. 2.]]]] # [2. 1. 2. 3. 2.]]]]
""" """
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
if in_dygraph_mode(): if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance( _paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings paddings, Variable) else paddings
return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings) 'data_format', data_format, 'paddings', _paddings)
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
"pad2d")
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
inputs = {'X': [input]} inputs = {'X': [input]}
if isinstance(paddings, Variable): if isinstance(paddings, Variable):
...@@ -14106,11 +14106,11 @@ def where(condition): ...@@ -14106,11 +14106,11 @@ def where(condition):
out = layers.where(condition) # [[]] out = layers.where(condition) # [[]]
""" """
helper = LayerHelper("where_index", **locals())
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.where_index(condition) return core.ops.where_index(condition)
helper = LayerHelper("where_index", **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64) dtype=core.VarDesc.VarType.INT64)
......
...@@ -1403,11 +1403,6 @@ def range(start, end, step, dtype, name=None): ...@@ -1403,11 +1403,6 @@ def range(start, end, step, dtype, name=None):
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
if not isinstance(start, Variable): if not isinstance(start, Variable):
with device_guard("cpu"): with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True) start = fill_constant([1], dtype, start, force_cpu=True)
...@@ -1429,6 +1424,11 @@ def range(start, end, step, dtype, name=None): ...@@ -1429,6 +1424,11 @@ def range(start, end, step, dtype, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.range(start, end, step) return core.ops.range(start, end, step)
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'range/arange') 'range/arange')
helper = LayerHelper('range', **locals()) helper = LayerHelper('range', **locals())
......
...@@ -715,20 +715,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -715,20 +715,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
else: else:
reduce_all_flag = False reduce_all_flag = False
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False dtype_flag = False
if dtype is not None: if dtype is not None:
if dtype in ['float64', 'int64']: if dtype in ['float64', 'int64']:
if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \ if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \
(convert_dtype(x.dtype) == "int32" and dtype == "int64"): (convert_dtype(x.dtype) == "int32" and dtype == "int64"):
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
dtype_flag = True dtype_flag = True
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -741,6 +732,22 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -741,6 +732,22 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
else: else:
return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype is not None:
if dtype in ['float64', 'int64']:
if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \
(convert_dtype(x.dtype) == "int32" and dtype == "int64"):
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sum') x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sum')
...@@ -1648,6 +1655,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1648,6 +1655,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
""" """
if in_dygraph_mode():
return core.ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
inputs = {'Input': [x]} inputs = {'Input': [x]}
attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2} attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
...@@ -1678,11 +1688,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1678,11 +1688,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"axis1 and axis2 cannot be the same axis." \ "axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2) "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode(): __check_input(input, offset, axis1, axis2)
return core.ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
if not in_dygraph_mode():
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('trace', **locals()) helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -1761,6 +1767,9 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1761,6 +1767,9 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
# [0.17020577, 0.27325270]]) # [0.17020577, 0.27325270]])
""" """
if in_dygraph_mode():
return core.ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
def __check_input(input, offset, dim1, dim2): def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input', check_dtype(x.dtype, 'Input',
['bool', 'int32', 'int64', 'float16', 'float32', 'float64'], ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
...@@ -1787,9 +1796,6 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1787,9 +1796,6 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"axis1 and axis2 cannot be the same axis." \ "axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2) "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode():
return core.ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
__check_input(input, offset, axis1, axis2) __check_input(input, offset, axis1, axis2)
helper = LayerHelper('diagonal', **locals()) helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -2247,18 +2253,17 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -2247,18 +2253,17 @@ def all(x, axis=None, keepdim=False, name=None):
else: else:
reduce_all_flag = False reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = { attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0], 'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim, 'keep_dim': keepdim,
'reduce_all': reduce_all_flag 'reduce_all': reduce_all_flag
} }
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'all') check_variable_and_dtype(x, 'x', ['bool'], 'all')
...@@ -2341,18 +2346,17 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -2341,18 +2346,17 @@ def any(x, axis=None, keepdim=False, name=None):
else: else:
reduce_all_flag = False reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = { attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0], 'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim, 'keep_dim': keepdim,
'reduce_all': reduce_all_flag 'reduce_all': reduce_all_flag
} }
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'any') check_variable_and_dtype(x, 'x', ['bool'], 'any')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册