未验证 提交 cb1a2512 编写于 作者: G guofei 提交者: GitHub

Make the place parameter default (#23094)

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop

* Make the place parameter default

test=develop
上级 d085f792
...@@ -455,12 +455,14 @@ handler = FetchHandlerExample(var_dict=var_dict) ...@@ -455,12 +455,14 @@ handler = FetchHandlerExample(var_dict=var_dict)
class Executor(object): class Executor(object):
""" """
An Executor in Python, supports single/multiple-GPU running, An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running. When construction the Executor, and single/multiple-CPU running.
the device is required.
Args: Args:
place(fluid.CPUPlace()|fluid.CUDAPlace(n)): This parameter represents place(fluid.CPUPlace()|fluid.CUDAPlace(n)|None): This parameter represents
the executor run on which device. which device the executor runs on. When this parameter is None, PaddlePaddle
will set the default device according to its installation version. If Paddle
is CPU version, the default device would be set to `CPUPlace()` . If Paddle is
GPU version, the default device would be set to `CUDAPlace(0)` . Default is None.
Returns: Returns:
Executor Executor
...@@ -473,9 +475,13 @@ class Executor(object): ...@@ -473,9 +475,13 @@ class Executor(object):
import numpy import numpy
import os import os
use_cuda = True # Set place explicitly.
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() # use_cuda = True
exe = fluid.Executor(place) # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# exe = fluid.Executor(place)
# If you don't set place, PaddlePaddle sets the default device.
exe = fluid.Executor()
train_program = fluid.Program() train_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
...@@ -498,14 +504,19 @@ class Executor(object): ...@@ -498,14 +504,19 @@ class Executor(object):
# Or, compiled the program and run. See `CompiledProgram` # Or, compiled the program and run. See `CompiledProgram`
# for more detail. # for more detail.
# NOTE: If you use CPU to run the program, you need # NOTE: If you use CPU to run the program or Paddle is
# to specify the CPU_NUM, otherwise, fluid will use # CPU version, you need to specify the CPU_NUM, otherwise,
# all the number of the logic core as the CPU_NUM, # fluid will use all the number of the logic core as
# in that case, the batch size of the input should be # the CPU_NUM, in that case, the batch size of the input
# greater than CPU_NUM, if not, the process will be # should be greater than CPU_NUM, if not, the process will be
# failed by an exception. # failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2) # Set place explicitly.
# if not use_cuda:
# os.environ['CPU_NUM'] = str(2)
# If you don't set place and PaddlePaddle is CPU version
# os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram( compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel( train_program).with_data_parallel(
...@@ -515,7 +526,13 @@ class Executor(object): ...@@ -515,7 +526,13 @@ class Executor(object):
fetch_list=[loss.name]) fetch_list=[loss.name])
""" """
def __init__(self, place): def __init__(self, place=None):
if place is None:
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
else:
self.place = place self.place = place
self.program_caches = dict() self.program_caches = dict()
self.ctx_caches = dict() self.ctx_caches = dict()
......
...@@ -31,10 +31,9 @@ class TestExecutor(unittest.TestCase): ...@@ -31,10 +31,9 @@ class TestExecutor(unittest.TestCase):
dtype='float32', dtype='float32',
append_batch_size=False) append_batch_size=False)
out = mul(x=a, y=b) out = mul(x=a, y=b)
place = core.CPUPlace()
a_np = numpy.random.random((100, 784)).astype('float32') a_np = numpy.random.random((100, 784)).astype('float32')
b_np = numpy.random.random((784, 100)).astype('float32') b_np = numpy.random.random((784, 100)).astype('float32')
exe = Executor(place) exe = Executor()
outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out]) outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out])
out = outs[0] out = outs[0]
self.assertEqual((100, 100), out.shape) self.assertEqual((100, 100), out.shape)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册