未验证 提交 0b76c735 编写于 作者: Y Yu Yang 提交者: GitHub

AddBiasOp does not care num_flatten_dims (#5200)

* AddBiasOp does not care num_flatten_dims

* Add comments
上级 2e91c7da
...@@ -142,8 +142,24 @@ class LayerHelper(object): ...@@ -142,8 +142,24 @@ class LayerHelper(object):
return self.program.global_block().create_var( return self.program.global_block().create_var(
*args, persistable=False, **kwargs) *args, persistable=False, **kwargs)
def append_bias_op(self, input_var): def append_bias_op(self, input_var, num_flatten_dims=None):
size = list(input_var.shape[1:]) """
Append bias operator and return its output. If the user does not set
bias_attr, append_bias_op will return input_var
:param input_var: the input variable. The len(input_var.shape) is larger
or equal than 2.
:param num_flatten_dims: The input tensor will be flatten as a matrix
when adding bias.
`matrix.shape = product(input_var.shape[0:num_flatten_dims]), product(
input_var.shape[num_flatten_dims:])`
"""
if num_flatten_dims is None:
num_flatten_dims = self.kwargs.get('num_flatten_dims', None)
if num_flatten_dims is None:
num_flatten_dims = 1
size = list(input_var.shape[num_flatten_dims:])
bias_attr = self.bias_attr() bias_attr = self.bias_attr()
if not bias_attr: if not bias_attr:
return input_var return input_var
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册