diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index a4eb6f706edab9479cbce436311eb96da8845646..2f480e00c100d579e100de17d3feb957f5ef6167 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -33,7 +33,6 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/variant.h" -#include "paddle/utils/Error.h" namespace paddle { namespace framework { diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index d9190408e151283ece8460286dd67818dd39da3e..496acc57912aacdf76f21aa88efb071d49aa0ce9 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -66,6 +66,7 @@ list(REMOVE_ITEM TEST_OPS test_fetch_var) list(REMOVE_ITEM TEST_OPS test_parallel_op) list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input) list(REMOVE_ITEM TEST_OPS test_dist_train) +list(REMOVE_ITEM TEST_OPS test_network_with_dtype) # tests that can be bundled together in one python process for speed. if(WITH_FAST_BUNDLE_TEST) @@ -83,6 +84,7 @@ py_test_modules(test_parallel_executor MODULES test_parallel_executor) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) py_test_modules(test_mul_op MODULES test_mul_op) +py_test_modules(test_network_with_dtype MODULES test_network_with_dtype) # tests that need to be run in separate process. py_test_modules(test_multihead_attention MODULES test_multihead_attention) diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py index fe8aceb3ae42f73590bffe2a372c771654a372a9..d4835dd18405fc7a0d508a780a734922e0abd12c 100644 --- a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -24,33 +24,30 @@ BATCH_SIZE = 20 class TestNetWithDtype(unittest.TestCase): - def set_network(self): + def setUp(self): self.dtype = "float64" self.init_dtype() - main = fluid.Program() - with fluid.program_guard(main): - self.x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) - self.y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) - y_predict = fluid.layers.fc(input=self.x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=self.y) + def run_net_on_place(self, place): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) + y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) - self.program = main - self.fetch_list = [avg_cost] + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(avg_cost) - - def run_net_on_place(self, place): + fetch_list = [avg_cost] train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) - feeder = fluid.DataFeeder(place=place, feed_list=[self.x, self.y]) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(startup) for data in train_reader(): - exe.run(self.program, - feed=feeder.feed(data), - fetch_list=self.fetch_list) + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) # the main program is runable, the datatype is fully supported break @@ -58,14 +55,12 @@ class TestNetWithDtype(unittest.TestCase): pass def test_cpu(self): - self.set_network() place = fluid.CPUPlace() self.run_net_on_place(place) def test_gpu(self): if not core.is_compiled_with_cuda(): return - self.set_network() place = fluid.CUDAPlace(0) self.run_net_on_place(place)