提交 c7eb199b 编写于 作者: Y Yang Yu

Init commit

上级 e5e206e2
import unittest import unittest
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
from paddle.v2.fluid.framework import Program import numpy
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
import numpy as np
import paddle.v2.fluid.core as core
class BaseParallelForTest(unittest.TestCase):
def main(self, callback, feed, fetch):
cpu = fluid.CPUPlace()
result_cpu = self._main_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=cpu,
use_parallel=False)
print result_cpu
class ParallelOpTest(unittest.TestCase): def _main_impl_(self, callback, feed, fetch, place, use_parallel=False):
def setUp(self): main = fluid.Program()
x = layers.data( startup = fluid.Program()
shape=[-1, 30, 40], # Fix seed
dtype='float32', main.random_seed = 10
name='x', startup.random_seed = 10
append_batch_size=False,
stop_gradient=False)
places = layers.get_places(device_count=4) with fluid.program_guard(main, startup):
pd = layers.ParallelDo(places=places) generator = callback()
# Automatically insert parallel do if use_parallel = True
if use_parallel:
places = fluid.layers.get_places()
pd = fluid.layers.ParallelDo(places)
data = next(generator)
if isinstance(data, fluid.Variable):
data = [data]
with pd.do(): with pd.do():
data = pd.read_input(x) ins = map(pd.read_input, data)
hidden = layers.fc(input=data, size=7) if len(ins) == 1:
pd.write_output(hidden) ins = ins[0]
data = pd() generator.send(ins) # patch input
loss = layers.mean(x=data) loss = next(generator)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) pd.write_output(loss)
sgd_optimizer.minimize(loss)
loss = pd()
exe = fluid.Executor(fluid.CPUPlace()) else:
exe.run(fluid.default_startup_program()) data = next(generator)
exe.run(fluid.default_main_program(), generator.send(data)
feed={ loss = next(generator)
x.name: np.random.uniform(0.1, 0.6,
(20, 30, 40)).astype("float32") avg_loss = fluid.layers.mean(x=loss)
}) fluid.backward.append_backward(loss=avg_loss)
def test_forward(self): exe = fluid.Executor(place)
pass exe.run(startup)
return exe.run(main, feed=feed, fetch_list=fetch)
class ParallelOpTest(BaseParallelForTest):
def test_simple_fc(self):
def __network__():
x = fluid.layers.data(shape=[784], dtype='float32', name='img')
x = yield x
hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
loss = fluid.layers.mean(x=hidden)
yield loss
self.main(
callback=__network__,
feed={
'img': numpy.random.random(size=(128, 784)).astype('float32')
},
fetch='fc1.w@GRAD')
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册