提交 c7eb199b 编写于 作者: Y Yang Yu

Init commit

上级 e5e206e2
import unittest import unittest
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
from paddle.v2.fluid.framework import Program import numpy
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
import numpy as np class BaseParallelForTest(unittest.TestCase):
import paddle.v2.fluid.core as core def main(self, callback, feed, fetch):
cpu = fluid.CPUPlace()
result_cpu = self._main_impl_(
class ParallelOpTest(unittest.TestCase): callback=callback,
def setUp(self): feed=feed,
x = layers.data( fetch=fetch,
shape=[-1, 30, 40], place=cpu,
dtype='float32', use_parallel=False)
name='x', print result_cpu
append_batch_size=False,
stop_gradient=False) def _main_impl_(self, callback, feed, fetch, place, use_parallel=False):
main = fluid.Program()
places = layers.get_places(device_count=4) startup = fluid.Program()
pd = layers.ParallelDo(places=places) # Fix seed
main.random_seed = 10
with pd.do(): startup.random_seed = 10
data = pd.read_input(x)
hidden = layers.fc(input=data, size=7) with fluid.program_guard(main, startup):
pd.write_output(hidden) generator = callback()
data = pd() # Automatically insert parallel do if use_parallel = True
loss = layers.mean(x=data) if use_parallel:
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) places = fluid.layers.get_places()
sgd_optimizer.minimize(loss) pd = fluid.layers.ParallelDo(places)
data = next(generator)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) if isinstance(data, fluid.Variable):
exe.run(fluid.default_main_program(), data = [data]
feed={ with pd.do():
x.name: np.random.uniform(0.1, 0.6, ins = map(pd.read_input, data)
(20, 30, 40)).astype("float32") if len(ins) == 1:
}) ins = ins[0]
generator.send(ins) # patch input
def test_forward(self): loss = next(generator)
pass pd.write_output(loss)
loss = pd()
else:
data = next(generator)
generator.send(data)
loss = next(generator)
avg_loss = fluid.layers.mean(x=loss)
fluid.backward.append_backward(loss=avg_loss)
exe = fluid.Executor(place)
exe.run(startup)
return exe.run(main, feed=feed, fetch_list=fetch)
class ParallelOpTest(BaseParallelForTest):
def test_simple_fc(self):
def __network__():
x = fluid.layers.data(shape=[784], dtype='float32', name='img')
x = yield x
hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
loss = fluid.layers.mean(x=hidden)
yield loss
self.main(
callback=__network__,
feed={
'img': numpy.random.random(size=(128, 784)).astype('float32')
},
fetch='fc1.w@GRAD')
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册