提交 9b7cd7f9 编写于 作者: Y yuyang18

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into feature/tensor_support_uint8

......@@ -24,6 +24,6 @@ if(NOT WITH_FLUID_ONLY)
endif()
add_subdirectory(testing)
if(NOT MOBILE_INFERENCE AND NOT RPI)
if(NOT MOBILE_INFERENCE AND NOT RPI AND NOT WITH_C_API)
add_subdirectory(fluid)
endif()
......@@ -33,7 +33,6 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
#include "paddle/utils/Error.h"
namespace paddle {
namespace framework {
......
......@@ -504,6 +504,7 @@ function main() {
;;
capi)
cmake_gen ${PYTHON_ABI:-""}
build
gen_capi_package
;;
fluid_inference_lib)
......
......@@ -66,6 +66,7 @@ list(REMOVE_ITEM TEST_OPS test_fetch_var)
list(REMOVE_ITEM TEST_OPS test_parallel_op)
list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input)
list(REMOVE_ITEM TEST_OPS test_dist_train)
list(REMOVE_ITEM TEST_OPS test_network_with_dtype)
# tests that can be bundled together in one python process for speed.
if(WITH_FAST_BUNDLE_TEST)
......@@ -83,6 +84,7 @@ py_test_modules(test_parallel_executor MODULES test_parallel_executor)
py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR})
py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn)
py_test_modules(test_mul_op MODULES test_mul_op)
py_test_modules(test_network_with_dtype MODULES test_network_with_dtype)
# tests that need to be run in separate process.
py_test_modules(test_multihead_attention MODULES test_multihead_attention)
......
......@@ -24,33 +24,30 @@ BATCH_SIZE = 20
class TestNetWithDtype(unittest.TestCase):
def set_network(self):
def setUp(self):
self.dtype = "float64"
self.init_dtype()
main = fluid.Program()
with fluid.program_guard(main):
self.x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype)
self.y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype)
y_predict = fluid.layers.fc(input=self.x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=self.y)
def run_net_on_place(self, place):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype)
y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype)
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
self.program = main
self.fetch_list = [avg_cost]
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)
def run_net_on_place(self, place):
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(place=place, feed_list=[self.x, self.y])
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
exe.run(startup)
for data in train_reader():
exe.run(self.program,
feed=feeder.feed(data),
fetch_list=self.fetch_list)
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
# the main program is runable, the datatype is fully supported
break
......@@ -58,14 +55,12 @@ class TestNetWithDtype(unittest.TestCase):
pass
def test_cpu(self):
self.set_network()
place = fluid.CPUPlace()
self.run_net_on_place(place)
def test_gpu(self):
if not core.is_compiled_with_cuda():
return
self.set_network()
place = fluid.CUDAPlace(0)
self.run_net_on_place(place)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册