未验证 提交 310f4320 编写于 作者: 2 201716010711 提交者: GitHub

clean fluid task: delete sum api (#48438)

上级 47e7b7a5
...@@ -104,7 +104,6 @@ __all__ = [ ...@@ -104,7 +104,6 @@ __all__ = [
'elementwise_mul', 'elementwise_mul',
'gaussian_random', 'gaussian_random',
'sampling_id', 'sampling_id',
'sum',
'shape', 'shape',
'clip', 'clip',
'clip_by_norm', 'clip_by_norm',
...@@ -5439,78 +5438,6 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): ...@@ -5439,78 +5438,6 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
return out return out
@templatedoc()
def sum(x):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
x (Variable|list(Variable)): ${x_comment}
Returns:
Variable: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = fluid.layers.sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
return paddle.add_n(x)
def shape(input): def shape(input):
""" """
:alias_main: paddle.shape :alias_main: paddle.shape
......
...@@ -3980,8 +3980,8 @@ class ModelAverage(Optimizer): ...@@ -3980,8 +3980,8 @@ class ModelAverage(Optimizer):
# backup param value to grad # backup param value to grad
layers.assign(input=param, output=grad) layers.assign(input=param, output=grad)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) tmp = paddle.add_n([num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3]) sum = paddle.add_n([sum_1, sum_2, sum_3])
tmp = layers.cast( tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype is None else self._dtype x=tmp, dtype='float32' if self._dtype is None else self._dtype
) )
......
...@@ -51,7 +51,7 @@ class TestBase(IPUOpTest): ...@@ -51,7 +51,7 @@ class TestBase(IPUOpTest):
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32'
) )
out = paddle.fluid.layers.sum([x, y], **self.attrs) out = paddle.add_n([x, y], **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
...@@ -92,7 +92,7 @@ class TestCase1(TestBase): ...@@ -92,7 +92,7 @@ class TestCase1(TestBase):
z = paddle.static.data( z = paddle.static.data(
name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32' name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32'
) )
out = paddle.fluid.layers.sum([x, y, z], **self.attrs) out = paddle.add_n([x, y, z], **self.attrs)
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -165,13 +165,13 @@ class FusionGroupPassSumTest(FusionGroupPassTest): ...@@ -165,13 +165,13 @@ class FusionGroupPassSumTest(FusionGroupPassTest):
) )
# subgraph with 2 op nodes # subgraph with 2 op nodes
tmp_0 = layers.sum( tmp_0 = paddle.add_n(
[self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]] [self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]]
) )
tmp_1 = paddle.sqrt(tmp_0) tmp_1 = paddle.sqrt(tmp_0)
tmp_2 = layers.mul(tmp_0, self.feed_vars[3]) tmp_2 = layers.mul(tmp_0, self.feed_vars[3])
# subgraph with 2 op nodes # subgraph with 2 op nodes
tmp_3 = paddle.square(layers.sum([tmp_1, tmp_2])) tmp_3 = paddle.square(paddle.add_n([tmp_1, tmp_2]))
self.append_gradients(tmp_3) self.append_gradients(tmp_3)
......
...@@ -3534,7 +3534,7 @@ class TestBook(LayerTest): ...@@ -3534,7 +3534,7 @@ class TestBook(LayerTest):
name="input", shape=[13, 11], dtype='float32' name="input", shape=[13, 11], dtype='float32'
) )
out = layers.sum(input) out = paddle.add_n(input)
return out return out
def make_slice(self): def make_slice(self):
......
...@@ -122,7 +122,7 @@ class SimpleNetWithCond: ...@@ -122,7 +122,7 @@ class SimpleNetWithCond:
cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32')) cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32'))
sum_cond = fluid.layers.cond(cond_i > 1.0, cond_true, cond_false) sum_cond = fluid.layers.cond(cond_i > 1.0, cond_true, cond_false)
sum_all = fluid.layers.sum([sum_xy, sub_yz, sum_cond]) sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond])
mean_out = paddle.mean(sum_all) mean_out = paddle.mean(sum_all)
if use_bf16: if use_bf16:
import paddle.static.amp as amp import paddle.static.amp as amp
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
class TestModelAverage(unittest.TestCase):
def test_model_average_static(self):
paddle.enable_static()
place = fluid.CPUPlace()
shape = [2, 3, 8, 8]
exe = fluid.Executor(place)
train_program = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
with fluid.program_guard(train_program, startup):
with fluid.unique_name.guard():
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = paddle.mean(hidden)
test_program = train_program.clone()
optimizer = paddle.optimizer.Momentum(
learning_rate=0.2, momentum=0.1
)
optimizer.minimize(loss)
# build ModelAverage optimizer
model_average = paddle.fluid.optimizer.ModelAverage(
0.15, min_average_window=2, max_average_window=10
)
exe.run(startup)
for i in range(10):
x = np.random.random(size=(10, 1)).astype('float32')
(
latest_b,
sum_1,
sum_2,
sum_3,
num_accumulates,
old_num_accumulates,
num_updates,
) = exe.run(
program=train_program,
feed={'X': x},
fetch_list=[
'fc_0.b_0',
'fc_0.b_0_sum_1_0',
'fc_0.b_0_sum_2_0',
'fc_0.b_0_sum_3_0',
'fc_0.b_0_num_accumulates_0',
'fc_0.b_0_old_num_accumulates_0',
'fc_0.b_0_num_updates_0',
],
)
self.assertTrue(
np.equal(sum_1, np.zeros(shape=[10], dtype='float32')).all()
)
self.assertTrue(
np.equal(sum_2, np.zeros(shape=[10], dtype='float32')).all()
)
self.assertTrue(
np.equal(num_accumulates, np.array([0], dtype='int64')).all()
)
self.assertTrue(
np.equal(old_num_accumulates, np.array([2], dtype='int64')).all()
)
self.assertTrue(
np.equal(num_updates, np.array([10], dtype='int64')).all()
)
average_b = (sum_1 + sum_2 + sum_3) / (
num_accumulates + old_num_accumulates
)
# apply ModelAverage
with model_average.apply(exe):
x = np.random.random(size=(10, 1)).astype('float32')
outs, b = exe.run(
program=test_program,
feed={'X': x},
fetch_list=[loss.name, 'fc_0.b_0'],
)
self.assertAlmostEqual(np.mean(average_b), np.mean(b))
x = np.random.random(size=(10, 1)).astype('float32')
outs, b = exe.run(
program=test_program,
feed={'X': x},
fetch_list=[loss.name, 'fc_0.b_0'],
)
self.assertAlmostEqual(np.mean(latest_b), np.mean(b))
if __name__ == "__main__":
unittest.main()
...@@ -426,20 +426,20 @@ class API_Test_Add_n(unittest.TestCase): ...@@ -426,20 +426,20 @@ class API_Test_Add_n(unittest.TestCase):
class TestRaiseSumError(unittest.TestCase): class TestRaiseSumError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_type(): def test_type():
fluid.layers.sum([11, 22]) paddle.add_n([11, 22])
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
def test_dtype(): def test_dtype():
data1 = fluid.data(name="input1", shape=[10], dtype="int8") data1 = fluid.data(name="input1", shape=[10], dtype="int8")
data2 = fluid.data(name="input2", shape=[10], dtype="int8") data2 = fluid.data(name="input2", shape=[10], dtype="int8")
fluid.layers.sum([data1, data2]) paddle.add_n([data1, data2])
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
def test_dtype1(): def test_dtype1():
data1 = fluid.data(name="input1", shape=[10], dtype="int8") data1 = fluid.data(name="input1", shape=[10], dtype="int8")
fluid.layers.sum(data1) paddle.add_n(data1)
self.assertRaises(TypeError, test_dtype1) self.assertRaises(TypeError, test_dtype1)
......
...@@ -130,20 +130,20 @@ class API_Test_Add_n(unittest.TestCase): ...@@ -130,20 +130,20 @@ class API_Test_Add_n(unittest.TestCase):
class TestRaiseSumError(unittest.TestCase): class TestRaiseSumError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_type(): def test_type():
fluid.layers.sum([11, 22]) paddle.add_n([11, 22])
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
def test_dtype(): def test_dtype():
data1 = fluid.data(name="input1", shape=[10], dtype="int8") data1 = fluid.data(name="input1", shape=[10], dtype="int8")
data2 = fluid.data(name="input2", shape=[10], dtype="int8") data2 = fluid.data(name="input2", shape=[10], dtype="int8")
fluid.layers.sum([data1, data2]) paddle.add_n([data1, data2])
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
def test_dtype1(): def test_dtype1():
data1 = fluid.data(name="input1", shape=[10], dtype="int8") data1 = fluid.data(name="input1", shape=[10], dtype="int8")
fluid.layers.sum(data1) paddle.add_n(data1)
self.assertRaises(TypeError, test_dtype1) self.assertRaises(TypeError, test_dtype1)
......
...@@ -637,7 +637,7 @@ class StaticGraphAdapter: ...@@ -637,7 +637,7 @@ class StaticGraphAdapter:
metrics.append(to_list(metric.compute(*(outputs + labels)))) metrics.append(to_list(metric.compute(*(outputs + labels))))
if mode == 'train' and self.model._optimizer: if mode == 'train' and self.model._optimizer:
self._loss_endpoint = fluid.layers.sum(losses) self._loss_endpoint = paddle.add_n(losses)
if self._nranks > 1: if self._nranks > 1:
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -795,7 +795,7 @@ class DynamicGraphAdapter: ...@@ -795,7 +795,7 @@ class DynamicGraphAdapter:
losses = self.model._loss(*(to_list(outputs) + labels)) losses = self.model._loss(*(to_list(outputs) + labels))
losses = to_list(losses) losses = to_list(losses)
final_loss = fluid.layers.sum(losses) final_loss = paddle.add_n(losses)
if self._amp_level != "O0": if self._amp_level != "O0":
scaled = self.model._scaler.scale(final_loss) scaled = self.model._scaler.scale(final_loss)
......
...@@ -548,8 +548,8 @@ class ModelAverage(Optimizer): ...@@ -548,8 +548,8 @@ class ModelAverage(Optimizer):
# backup param value to grad # backup param value to grad
layers.assign(input=param, output=grad) layers.assign(input=param, output=grad)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) tmp = paddle.add_n([num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3]) sum = paddle.add_n([sum_1, sum_2, sum_3])
tmp = layers.cast( tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype is None else self._dtype x=tmp, dtype='float32' if self._dtype is None else self._dtype
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册