未验证 提交 310edc0d 编写于 作者: L Leo Chen 提交者: GitHub

Update layers used in ptb model to use auto-generated op functions in dygraph mode (#21724)

* update layers, test=develop

* fix input numpy, test=develop

* fix bugs, test=develop

* follow commments, test=develop

* update getitem, test=develop
上级 f17bd178
......@@ -188,7 +188,25 @@ class VarBase {
}
}
framework::proto::VarType::Type DataType() const { return data_type_; }
framework::proto::VarType::Type DataType() const {
const framework::Tensor* tensor = nullptr;
if (var_.IsInitialized()) {
if (type_ == framework::proto::VarType::LOD_TENSOR) {
tensor = &(var_.Get<framework::LoDTensor>());
} else if (type_ == framework::proto::VarType::SELECTED_ROWS) {
tensor = &(var_.Get<framework::SelectedRows>().value());
} else {
VLOG(6) << "Variable " << name_ << " is not initialized";
return data_type_;
}
}
if (tensor && tensor->IsInitialized()) {
return tensor->type();
} else {
VLOG(6) << "The tensor of variable " << name_ << " is not initialized";
return data_type_;
}
}
void ClearGradient();
......
......@@ -182,9 +182,9 @@ class Layer(core.Layer):
if parallel_helper._is_data_parallel_mode():
parallel_helper._broadcast_parameters(
self._parameters.values())
self._built = True
outputs = self.forward(*inputs, **kwargs)
self._built = True
return outputs
def forward(self, *inputs, **kwargs):
......
......@@ -216,7 +216,8 @@ def monkey_patch_math_varbase():
setattr(core.VarBase, method_name,
_elemwise_method_creator_(method_name, op_type, reverse,
scalar_method)),
scalar_method))
# b = -a
core.VarBase.__neg__ = _neg_
core.VarBase.astype = astype
......@@ -19,7 +19,7 @@ from six.moves import reduce
from .. import core
from ..layers import utils
from . import layers
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer_
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer
import numpy as np
......@@ -1542,18 +1542,24 @@ class Embedding(layers.Layer):
self._w = value
def forward(self, input):
attrs = {
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
}
if in_dygraph_mode():
inputs = {'Ids': [input], 'W': [self._w]}
outs = core.ops.lookup_table_v2(inputs, attrs)
return outs['Out'][0]
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': self._w},
outputs={'Out': out},
attrs={
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
})
attrs=attrs)
return out
......
......@@ -208,7 +208,67 @@ def monkey_patch_varbase():
self.shape)
def __getitem__(self, item):
return _getitem_impl_(self, item)
if not isinstance(item, tuple):
item = [item]
decrease_axis = []
slice_axis = []
slice_start = []
slice_end = []
reverse_axis = []
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step if slice_item.step else 1
assert (step == 1 or step == -1)
if step == -1:
reverse_axis.append(dim)
assert (start is None and end is None)
if start is None and end is None:
continue
if start is None:
start = 0
if end is None:
end = 10000000
slice_axis.append(dim)
slice_start.append(start)
slice_end.append(end)
else:
# int
decrease_axis.append(dim)
slice_axis.append(dim)
slice_start.append(slice_item)
slice_end.append(slice_item + 1
if slice_item != -1 else 10000000)
out = self
if len(slice_axis) > 0:
# append slice_op here
inputs = {'Input': [out]}
attrs = {
'axes': slice_axis,
'starts': slice_start,
'ends': slice_end,
'decrease_axis': decrease_axis
}
outs = core.ops.slice(inputs, attrs)
out = outs['Out'][0]
if len(reverse_axis) > 0:
inputs = {'X': [out]}
attrs = {'axis': reverse_axis}
outs = core.ops.reverse(inputs, attrs)
out = outs['Out'][0]
return out
for method_name, method in (("set_value", set_value), ("block", block),
("backward", backward), ("gradient", gradient),
......
......@@ -274,16 +274,15 @@ class GradClipByGlobalNorm(GradClipBase):
norm_global = layers.reduce_sum(norm_global)
norm_global = layers.sqrt(norm_global)
clip_scale = layers.elementwise_div(
x=self.max_global_norm,
y=layers.elementwise_max(
x=norm_global, y=self.max_global_norm))
clip_scale = self.max_global_norm / (layers.elementwise_max(
x=norm_global, y=self.max_global_norm))
for p, g in para_and_grad:
if g is None:
out.append((p, g))
continue
new_grad = layers.elementwise_mul(x=g, y=clip_scale)
new_grad = g * clip_scale
out.append((p, new_grad))
......
......@@ -15,6 +15,8 @@
from __future__ import print_function
from . import framework
from . import core
from .framework import in_dygraph_mode
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
from .core import VarDesc
......
......@@ -20,7 +20,7 @@ import string
from six.moves import cStringIO
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, in_dygraph_mode
from ..layer_helper import LayerHelper
from ..data_feeder import check_type_and_dtype
......@@ -252,9 +252,16 @@ def generate_activation_fn(op_type):
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None):
helper = LayerHelper(op_type, **locals())
if in_dygraph_mode():
inputs = {'X': [x]}
op = getattr(core.ops, op_type)
outs = op(inputs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64'], op_type)
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output
......
......@@ -19,7 +19,8 @@ from functools import partial, reduce
from . import nn
from .layer_function_generator import templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from ..framework import Variable, in_dygraph_mode
from .. import core
from ..data_feeder import check_type_and_dtype
from ..param_attr import ParamAttr
from ..initializer import NumpyArrayInitializer
......@@ -1213,6 +1214,21 @@ def softmax_with_cross_entropy(logits,
out = fluid.layers.softmax_with_cross_entropy(
logits=fc, label=label)
"""
attrs = {
'soft_label': soft_label,
'ignore_index': ignore_index,
'numeric_stable_mode': numeric_stable_mode,
'axis': axis
}
if in_dygraph_mode():
inputs = {'Logits': [logits], 'Label': [label]}
outs = core.ops.softmax_with_cross_entropy(inputs, attrs)
if not return_softmax:
return outs['Loss'][0]
else:
return outs['Loss'][0], outs['Softmax'][0]
helper = LayerHelper('softmax_with_cross_entropy', **locals())
softmax = helper.create_variable_for_type_inference(dtype=logits.dtype)
loss = helper.create_variable_for_type_inference(dtype=logits.dtype)
......@@ -1222,12 +1238,7 @@ def softmax_with_cross_entropy(logits,
'Label': label},
outputs={'Softmax': softmax,
'Loss': loss},
attrs={
'soft_label': soft_label,
'ignore_index': ignore_index,
'numeric_stable_mode': numeric_stable_mode,
'axis': axis
})
attrs=attrs)
if return_softmax:
return loss, softmax
......
......@@ -285,6 +285,7 @@ def monkey_patch_variable():
setattr(Variable, method_name,
_elemwise_method_creator_(method_name, op_type, reverse,
scalar_method))
# b = -a
Variable.__neg__ = _neg_
Variable.astype = astype
......@@ -24,7 +24,7 @@ import os
import inspect
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program
from ..dygraph import base
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
......@@ -186,6 +186,48 @@ __all__ = [
]
@dygraph_only
def _append_activation_in_dygraph(input,
act=None,
use_cudnn=False,
use_mkldnn=False):
"""Append activation in dygraph mode.
Args:
input: the input variable.
act: activation type
use_mkldnn: if use mkldnn
use_cudnn: if use cudnn
Return the Variable after append activation
"""
attrs = {'use_cudnn': use_cudnn, 'use_mkldnn': use_mkldnn}
inputs = {"X": [input]}
act_op = getattr(core.ops, act)
res = act_op(inputs, attrs)
return res['Out'][0]
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
attrs = {'axis': axis, 'use_mkldnn': use_mkldnn}
inputs = {'X': [x], 'Y': [y]}
op = getattr(core.ops, op_name)
outs = op(inputs, attrs)
pre_act = outs['Out'][0]
if not act:
return pre_act
else:
return _append_activation_in_dygraph(
pre_act, act, use_mkldnn=use_mkldnn)
def fc(input,
size,
num_flatten_dims=1,
......@@ -804,28 +846,41 @@ def dropout(x,
droped = fluid.layers.dropout(x, dropout_prob=0.5)
"""
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
}
return attrs
if in_dygraph_mode():
attrs = get_attrs(default_main_program(), dropout_prob, is_test, seed)
attrs['is_test'] = not _dygraph_tracer()._train_mode
inputs = {'X': [x]}
outs = core.ops.dropout(inputs, attrs)
return outs['Out'][0]
helper = LayerHelper('dropout', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
if (seed is None or seed == 0) and helper.main_program.random_seed != 0:
seed = helper.main_program.random_seed
attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs={
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': dropout_implementation,
})
attrs=attrs)
return out
......@@ -3847,21 +3902,28 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
helper = LayerHelper('reduce_sum', **locals())
if dim is not None and not isinstance(dim, list):
dim = [dim]
attrs = {
'dim': dim if dim != None else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None else False
}
if in_dygraph_mode():
inputs = {'X': [input]}
outs = core.ops.reduce_sum(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None else False
})
attrs=attrs)
return out
......@@ -3914,22 +3976,30 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
"""
helper = LayerHelper('reduce_mean', **locals())
if dim is not None and not isinstance(dim, list):
dim = [dim]
attrs = {
'dim': dim if dim != None else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None else False
}
if in_dygraph_mode():
inputs = {'X': [input]}
outs = core.ops.reduce_mean(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'],
'reduce_mean')
helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
helper.append_op(
type='reduce_mean',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim if dim != None else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None else False
})
attrs=attrs)
return out
......@@ -4280,6 +4350,37 @@ def split(input, num_or_sections, dim=-1, name=None):
# x1.shape [3, 3, 5]
# x2.shape [3, 4, 5]
"""
if in_dygraph_mode():
inputs = {'X': [input]}
attrs = {}
if isinstance(dim, int):
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
else:
dim.stop_gradient = True
inputs['AxisTensor'] = [dim]
if isinstance(num_or_sections, int):
num = num_or_sections
attrs['num'] = num_or_sections
res = core.ops.split(inputs, attrs, {}, {'Out': num})
return res['Out']
elif isinstance(num_or_sections, list):
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
contain_var = not all(not isinstance(ele, Variable)
for ele in num_or_sections)
if contain_var:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list[int] in Dygraph mode, but "
"received %s." % ('list[Variable]'))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list in Dygraph mode, but "
"received %s." % (type(num_or_sections)))
if not isinstance(num_or_sections, (int, list, tuple)):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or "
......@@ -4508,6 +4609,16 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
inputs = {'X': [x], 'Y': [y]}
outs = core.ops.matmul(inputs, attrs)
return outs['Out'][0]
def __check_input(x, y):
var_names = {'x': x, 'y': y}
......@@ -4554,11 +4665,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
})
attrs=attrs)
return out
......@@ -4863,10 +4970,17 @@ def transpose(x, perm, name=None):
#(3L, 2L, 4L)
"""
if in_dygraph_mode():
attrs = {'axis': perm}
inputs = {'X': [x]}
outs = core.ops.transpose2(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
......@@ -5458,14 +5572,41 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
# the shape of reshaped_2 is [5,10].
"""
if in_dygraph_mode():
#TODO(zhiqiu): open inplace if we can.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
attrs = {}
if isinstance(shape, (list, tuple)):
contain_var = not all(not isinstance(ele, Variable)
for ele in shape)
if contain_var:
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
attrs['shape'] = shape
else:
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
inputs = {'X': [x]}
outs = core.ops.reshape2(inputs, attrs)
pre_act = outs['Out'][0]
if act is None:
return pre_act
else:
return _append_activation_in_dygraph(pre_act, act)
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
helper = LayerHelper("reshape2", **locals())
inputs = {"X": x}
attrs = {}
def contain_var(one_list):
for ele in one_list:
......@@ -5513,26 +5654,23 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
(dim_idx, str(dim_size)))
return attrs_shape
if in_dygraph_mode():
inputs = {'X': x}
attrs = {'shape': shape}
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if contain_var(shape):
inputs['ShapeTensor'] = get_new_shape_tensor(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace and not in_dygraph_mode(
) else helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if contain_var(shape):
inputs['ShapeTensor'] = get_new_shape_tensor(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
inputs["Shape"] = actual_shape
out = x if inplace else helper.create_variable_for_type_inference(
dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
......@@ -9736,6 +9874,45 @@ def slice(input, axes, starts, ends):
# sliced_2 is input[0:3, 0:2, 2:4].
"""
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
if in_dygraph_mode():
infer_flags = list(1 for i in range(len(axes)))
inputs = {'Input': [input]}
if isinstance(starts, (list, tuple)):
if contain_var(starts):
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
if isinstance(ends, (list, tuple)):
if contain_var(ends):
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
else:
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s." % type(shape))
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'infer_flags': infer_flags
}
outs = core.ops.slice(inputs, attrs)
return outs['Out'][0]
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
......@@ -9745,12 +9922,6 @@ def slice(input, axes, starts, ends):
helper = LayerHelper('slice', **locals())
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
......@@ -9768,52 +9939,43 @@ def slice(input, axes, starts, ends):
attrs = {'axes': axes}
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
inputs = {'Input': input}
attrs = {
'axes': axes,
'starts': starts,
'ends': ends,
'infer_flags': infer_flags
}
else:
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if not contain_var(starts):
attrs['starts'] = starts
else:
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if not contain_var(ends):
attrs['ends'] = ends
else:
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
# infer_flags
attrs['infer_flags'] = infer_flags
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if not contain_var(starts):
attrs['starts'] = starts
else:
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if not contain_var(ends):
attrs['ends'] = ends
else:
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op(
......@@ -10310,6 +10472,10 @@ Examples:
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_add')
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
......@@ -10387,6 +10553,10 @@ Examples:
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_div')
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
......@@ -10464,6 +10634,10 @@ Examples:
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub')
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
......@@ -10541,6 +10715,10 @@ Examples:
print(z_value) # z.shape=[2,3,4,5]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mul')
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
......@@ -10595,6 +10773,10 @@ Examples:
print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_max')
return _elementwise_op(LayerHelper('elementwise_max', **locals()))
......@@ -10677,7 +10859,9 @@ Examples:
print(z_value) #[2, 243, 16]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow')
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
......@@ -10707,6 +10891,10 @@ Examples:
print(z_value) #[1, 3, 3]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod')
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
......@@ -10736,6 +10924,10 @@ Examples:
print(z_value) #[3, 2, 1]
"""
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv')
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
......
......@@ -16,10 +16,11 @@ from __future__ import print_function
from six.moves import reduce
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode
from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from ..data_feeder import check_type_and_dtype, check_type, check_dtype, convert_dtype
import numpy
......@@ -251,7 +252,16 @@ def concat(input, axis=0, name=None):
# [11 12 13]
# [14 15 16]]
"""
helper = LayerHelper('concat', **locals())
if in_dygraph_mode():
inputs = {'X': input}
if not isinstance(axis, int):
raise TypeError(
"Input 'axis' in concat must be int in Dygraph mode.")
attrs = {'axis': axis}
outs = core.ops.concat(inputs, attrs)
return outs['Out'][0]
if not isinstance(input, list):
warnings.warn(
"The type of input in concat should be list, but received %s." %
......@@ -270,6 +280,7 @@ def concat(input, axis=0, name=None):
else:
attrs['axis'] = axis
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
......
......@@ -31,6 +31,7 @@ from .layer_helper import LayerHelper
from .layers import ops
from .regularizer import append_regularization_ops
from .dygraph import base as imperative_base
from .dygraph import no_grad
from .dygraph.learning_rate_scheduler import LearningRateDecay
from paddle.fluid import core
from paddle.fluid.layers import tensor
......@@ -747,9 +748,20 @@ class SGDOptimizer(Optimizer):
name=name)
self.type = "sgd"
@no_grad
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
if framework.in_dygraph_mode():
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"LearningRate": [self._create_param_lr(param_and_grad)]
}
attrs = {}
outputs = {'ParamOut': [param_and_grad[0]]}
outs = core.ops.sgd(inputs, attrs, outputs)
return outs['ParamOut'][0]
assert isinstance(block, framework.Block)
# create the optimize op
sgd_op = block.append_op(
type=self.type,
......
......@@ -296,6 +296,7 @@ class TestImperative(unittest.TestCase):
var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp.stop_gradient = False
l = MyLayer("my_layer")
print(var_inp)
x = l(var_inp)[0]
self.assertIsNotNone(x)
dy_out = x.numpy()
......@@ -386,12 +387,14 @@ class TestImperative(unittest.TestCase):
self.assertEqual(len(sublayers), 2)
def test_dygraph_vs_static(self):
inp1 = np.random.rand(4, 3, 3)
inp2 = np.random.rand(4, 3, 3)
np_inp1 = np.random.rand(4, 3, 3)
np_inp2 = np.random.rand(4, 3, 3)
# dynamic graph
with fluid.dygraph.guard():
if np.sum(inp1) < np.sum(inp2):
inp1 = fluid.dygraph.to_variable(np_inp1)
inp2 = fluid.dygraph.to_variable(np_inp2)
if np.sum(np_inp1) < np.sum(np_inp2):
x = fluid.layers.elementwise_add(inp1, inp2)
else:
x = fluid.layers.elementwise_sub(inp1, inp2)
......@@ -429,8 +432,8 @@ class TestImperative(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
static_result = exe.run(fluid.default_main_program(),
feed={'inp1': inp1,
'inp2': inp2},
feed={'inp1': np_inp1,
'inp2': np_inp2},
fetch_list=out)[0]
self.assertTrue(np.allclose(dygraph_result, static_result))
......
......@@ -33,6 +33,7 @@ import paddle.fluid.layers as layers
from test_imperative_base import new_program_scope
from paddle.fluid.dygraph import nn
from paddle.fluid.dygraph import base
from paddle.fluid.dygraph import to_variable
class LayerTest(unittest.TestCase):
......@@ -515,11 +516,11 @@ class TestLayer(LayerTest):
fetch_list=[ret])[0]
with self.dynamic_graph():
ret = layers.elementwise_add(n, n2)
ret = layers.elementwise_pow(ret, n3)
ret = layers.elementwise_div(ret, n4)
ret = layers.elementwise_sub(ret, n5)
dy_ret = layers.elementwise_mul(ret, n6)
ret = layers.elementwise_add(to_variable(n), to_variable(n2))
ret = layers.elementwise_pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5))
dy_ret = layers.elementwise_mul(ret, to_variable(n6))
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret_value))
......@@ -528,8 +529,8 @@ class TestLayer(LayerTest):
n2 = np.ones([3, 3], dtype='float32') * 2
with self.dynamic_graph():
min_ret = layers.elementwise_min(n, n2)
max_ret = layers.elementwise_max(n, n2)
min_ret = layers.elementwise_min(to_variable(n), to_variable(n2))
max_ret = layers.elementwise_max(to_variable(n), to_variable(n2))
min_ret_value = min_ret.numpy()
max_ret_value = max_ret.numpy()
......
......@@ -100,6 +100,7 @@ class TestVarBase(unittest.TestCase):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertTrue(np.array_equal(var[1, :].numpy(), self.array[1, :]))
self.assertTrue(np.array_equal(var[::-1].numpy(), self.array[::-1]))
def test_var_base_to_np(self):
with fluid.dygraph.guard():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册