提交 f4a0e684 编写于 作者: B baojun 提交者: Tao Luo

Fix ngraph compile WITH_DISTRIBUTE=ON (#15636)

* fix compile issue with_distribute test=develop

* simplified logic test=develop

* use ngraph dependency test=develop

* set cpu only test=develop

* update test and eliminate fp16 test test=develop
上级 2fbfebc5
...@@ -158,18 +158,19 @@ cc_library(variable_helper SRCS variable_helper.cc DEPS lod_tensor) ...@@ -158,18 +158,19 @@ cc_library(variable_helper SRCS variable_helper.cc DEPS lod_tensor)
cc_library(naive_executor SRCS naive_executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper) cc_library(naive_executor SRCS naive_executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper)
if(WITH_DISTRIBUTE) if(WITH_NGRAPH)
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog set(NGRAPH_EXE_DEPS ngraph_engine)
lod_rank_table feed_fetch_method sendrecvop_rpc ${GLOB_DISTRIBUTE_DEPS} graph_to_program_pass variable_helper) else()
set(NGRAPH_EXE_DEPS)
endif()
set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") if(WITH_DISTRIBUTE)
set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog
lod_rank_table feed_fetch_method sendrecvop_rpc ${GLOB_DISTRIBUTE_DEPS} graph_to_program_pass variable_helper ${NGRAPH_EXE_DEPS})
set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
else() else()
if (WITH_NGRAPH) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper ${NGRAPH_EXE_DEPS})
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper ngraph_engine)
else ()
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper)
endif()
cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op) cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op)
endif() endif()
......
...@@ -35,7 +35,7 @@ class NgraphEngineOp : public framework::OperatorWithKernel { ...@@ -35,7 +35,7 @@ class NgraphEngineOp : public framework::OperatorWithKernel {
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
framework::OpKernelType kt = framework::OpKernelType( framework::OpKernelType kt = framework::OpKernelType(
framework::proto::VarType::FP32, ctx.GetPlace()); framework::proto::VarType::FP32, platform::CPUPlace());
return kt; return kt;
} }
}; };
......
...@@ -16,14 +16,37 @@ from __future__ import print_function ...@@ -16,14 +16,37 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_accuracy_op import TestAccuracyOp
class TestNGRAPHAccuracyOp(TestAccuracyOp): class TestNGRAPHAccuracyOp(OpTest):
def setUp(self): def setUp(self):
super(TestNGRAPHAccuracyOp, self).setUp() self.op_type = "accuracy"
self.dtype = np.float32
self.init_dtype()
n = 128
infer = np.random.random((n, 1)).astype(self.dtype)
indices = np.random.randint(0, 2, (n, 1))
label = np.random.randint(0, 2, (n, 1))
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int64"),
'Total': np.array([n]).astype("int64")
}
self._cpu_only = True
def init_dtype(self):
pass
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,35 +15,59 @@ ...@@ -15,35 +15,59 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_conv2d_op import * from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1
class TestNGRAPH(TestConv2dOp): class TestNGRAPH(TestConv2dOp):
def setUp(self):
super(TestNGRAPH, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPH, self).init_kernel_type() super(TestNGRAPH, self).init_kernel_type()
class TestNGRAPHWithPad(TestWithPad): class TestNGRAPHWithPad(TestWithPad):
def setUp(self):
super(TestNGRAPHWithPad, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPHWithPad, self).init_kernel_type() super(TestNGRAPHWithPad, self).init_kernel_type()
class TestNGRAPHWithStride(TestWithStride): class TestNGRAPHWithStride(TestWithStride):
def setUp(self):
super(TestNGRAPHWithStride, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPHWithStride, self).init_kernel_type() super(TestNGRAPHWithStride, self).init_kernel_type()
class TestNGRAPHWithGroup(TestWithGroup): class TestNGRAPHWithGroup(TestWithGroup):
def setUp(self):
super(TestNGRAPHWithGroup, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPHWithGroup, self).init_kernel_type() super(TestNGRAPHWithGroup, self).init_kernel_type()
class TestNGRAPHWith1x1(TestWith1x1): class TestNGRAPHWith1x1(TestWith1x1):
def setUp(self):
super(TestNGRAPHWith1x1, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPHWith1x1, self).init_kernel_type() super(TestNGRAPHWith1x1, self).init_kernel_type()
class TestNGRAPHWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): class TestNGRAPHWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
def setUp(self):
super(TestNGRAPHWithInput1x1Filter1x1, self).setUp()
self._cpu_only = True
def init_kernel_type(self): def init_kernel_type(self):
super(TestNGRAPHWithInput1x1Filter1x1, self).init_kernel_type() super(TestNGRAPHWithInput1x1Filter1x1, self).init_kernel_type()
......
...@@ -14,73 +14,16 @@ ...@@ -14,73 +14,16 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_elementwise_add_op import * from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp
class TestNGRAPHElementwiseAddOp(TestElementwiseAddOp): class TestNGRAPHElementwiseAddOp(TestElementwiseAddOp):
def init_input_output(self): def setUp(self):
super(TestNGRAPHElementwiseAddOp, self).init_input_output() super(TestNGRAPHElementwiseAddOp, self).setUp()
self._cpu_only = True
class TestNGRAPHElementwiseAddOp_scalar(TestElementwiseAddOp_scalar):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_scalar, self).init_input_output()
class TestNGRAPHElementwiseAddOp_scalar2(TestElementwiseAddOp_scalar2):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_scalar2, self).init_input_output()
class TestNGRAPHElementwiseAddOp_Vector(TestElementwiseAddOp_Vector):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_Vector, self).init_input_output()
class TesNGRAPHtElementwiseAddOp_broadcast_0(TestElementwiseAddOp_broadcast_0):
def init_input_output(self):
super(TesNGRAPHtElementwiseAddOp_broadcast_0, self).init_input_output()
class TestNGRAPHElementwiseAddOp_broadcast_1(TestElementwiseAddOp_broadcast_1):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_broadcast_1, self).init_input_output()
class TestNGRAPHElementwiseAddOp_broadcast_2(TestElementwiseAddOp_broadcast_2):
def init_input_output(self): def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_broadcast_2, self).init_input_output() super(TestNGRAPHElementwiseAddOp, self).init_input_output()
class TestNGRAPHElementwiseAddOp_broadcast_3(TestElementwiseAddOp_broadcast_3):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_broadcast_3, self).init_input_output()
class TestNGRAPHElementwiseAddOp_broadcast_4(TestElementwiseAddOp_broadcast_4):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_broadcast_4, self).init_input_output()
class TestNGRAPHElementwiseAddOp_rowwise_add_0(
TestElementwiseAddOp_rowwise_add_0):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_rowwise_add_0,
self).init_input_output()
class TestNGRAPHElementwiseAddOp_rowwise_add_1(
TestElementwiseAddOp_rowwise_add_1):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_rowwise_add_1,
self).init_input_output()
class TestNGRAPHElementwiseAddOp_channelwise_add(
TestElementwiseAddOp_channelwise_add):
def init_input_output(self):
super(TestNGRAPHElementwiseAddOp_channelwise_add,
self).init_input_output()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -14,17 +14,13 @@ ...@@ -14,17 +14,13 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_mean_op import TestMeanOp, TestFP16MeanOp from paddle.fluid.tests.unittests.test_mean_op import TestMeanOp
class TestNGRAPHMeanOp(TestMeanOp): class TestNGRAPHMeanOp(TestMeanOp):
def setUp(self): def setUp(self):
super(TestNGRAPHMeanOp, self).setUp() super(TestNGRAPHMeanOp, self).setUp()
self._cpu_only = True
class TestNGRAPHFP16MeanOp(TestFP16MeanOp):
def setUp(self):
super(TestNGRAPHFP16MeanOp, self).setUp()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,27 +15,38 @@ ...@@ -15,27 +15,38 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_mul_op import TestMulOp, TestMulOp2, TestFP16MulOp1, TestFP16MulOp2 import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
class TestNGRAPHMulOp(OpTest):
def setUp(self):
self.op_type = "mul"
self.dtype = np.float32
self.init_dtype_type()
self.inputs = {
'X': np.random.random((2, 4)).astype(self.dtype),
'Y': np.random.random((4, 4)).astype(self.dtype)
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
self._cpu_only = True
class TestNGRAPHMulOp(TestMulOp):
def init_dtype_type(self): def init_dtype_type(self):
pass pass
def test_check_output(self):
self.check_output()
class TestNGRAPHMulOp2(TestMulOp2): def test_check_grad_normal(self):
def init_dtype_type(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
pass
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
class TestNGRAPHFP16MulOp1(TestFP16MulOp1): def test_check_grad_ingore_y(self):
def init_dtype_type(self): self.check_grad(
pass ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
class TestNGRAPHFP16MulOp2(TestFP16MulOp2):
def init_dtype_type(self):
pass
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -14,35 +14,59 @@ ...@@ -14,35 +14,59 @@
from __future__ import print_function from __future__ import print_function
from paddle.fluid.tests.unittests.test_pool2d_op import * from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5
class TestNGRAPHPool2D_Op(TestPool2D_Op): class TestNGRAPHPool2D_Op(TestPool2D_Op):
def setUp(self):
super(TestNGRAPHPool2D_Op, self).setUp()
self._cpu_only = True
def init_test_case(self): def init_test_case(self):
super(TestNGRAPHPool2D_Op, self).init_test_case() super(TestNGRAPHPool2D_Op, self).init_test_case()
class TestNGRAPHCase1(TestCase1): class TestNGRAPHCase1(TestCase1):
def setUp(self):
super(TestNGRAPHCase1, self).setUp()
self._cpu_only = True
def init_test_case(self): def init_test_case(self):
super(TestNGRAPHCase1, self).init_test_case() super(TestNGRAPHCase1, self).init_test_case()
class TestNGRAPHCase2(TestCase2): class TestNGRAPHCase2(TestCase2):
def setUp(self):
super(TestNGRAPHCase2, self).setUp()
self._cpu_only = True
def init_test_case(self): def init_test_case(self):
super(TestNGRAPHCase2, self).init_test_case() super(TestNGRAPHCase2, self).init_test_case()
class TestNGRAPHCase3(TestCase3): class TestNGRAPHCase3(TestCase3):
def setUp(self):
super(TestNGRAPHCase3, self).setUp()
self._cpu_only = True
def init_pool_type(self): def init_pool_type(self):
super(TestNGRAPHCase3, self).init_pool_type() super(TestNGRAPHCase3, self).init_pool_type()
class TestNGRAPHCase4(TestCase4): class TestNGRAPHCase4(TestCase4):
def setUp(self):
super(TestNGRAPHCase4, self).setUp()
self._cpu_only = True
def init_pool_type(self): def init_pool_type(self):
super(TestNGRAPHCase4, self).init_pool_type() super(TestNGRAPHCase4, self).init_pool_type()
class TestNGRAPHCase5(TestCase5): class TestNGRAPHCase5(TestCase5):
def setUp(self):
super(TestNGRAPHCase5, self).setUp()
self._cpu_only = True
def init_pool_type(self): def init_pool_type(self):
super(TestNGRAPHCase5, self).init_pool_type() super(TestNGRAPHCase5, self).init_pool_type()
......
...@@ -13,25 +13,23 @@ ...@@ -13,25 +13,23 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_scale_op import TestScaleOp, TestScaleOpSelectedRows, TestScaleFp16Op, TestScaleFp16OpSelectedRows from paddle.fluid.tests.unittests.test_scale_op import TestScaleOp, TestScaleOpSelectedRows
class TestNGRAPHScaleOp(TestScaleOp): class TestNGRAPHScaleOp(TestScaleOp):
def init_dtype_type(self): def setUp(self):
pass super(TestNGRAPHScaleOp, self).setUp()
self._cpu_only = True
class TestNGRAPHScaleOpSelectedRows(TestScaleOpSelectedRows):
def init_dtype_type(self): def init_dtype_type(self):
pass pass
class TestNGRAPHScaleFp16Op(TestScaleFp16Op): class TestNGRAPHScaleOpSelectedRows(TestScaleOpSelectedRows):
def init_dtype_type(self): def setUp(self):
pass super(TestNGRAPHScaleOpSelectedRows, self).setUp()
self._cpu_only = True
class TestNGRAPHScaleFp16OpSelectedRows(TestScaleFp16OpSelectedRows):
def init_dtype_type(self): def init_dtype_type(self):
pass pass
......
...@@ -20,21 +20,25 @@ from paddle.fluid.tests.unittests.test_top_k_op import TestTopkOp, TestTopkOp3d, ...@@ -20,21 +20,25 @@ from paddle.fluid.tests.unittests.test_top_k_op import TestTopkOp, TestTopkOp3d,
class TestNGRAPHTopkOp(TestTopkOp): class TestNGRAPHTopkOp(TestTopkOp):
def setUp(self): def setUp(self):
super(TestNGRAPHTopkOp, self).setUp() super(TestNGRAPHTopkOp, self).setUp()
self._cpu_only = True
class TestNGRAPHTopkOp2(TestTopkOp2): class TestNGRAPHTopkOp2(TestTopkOp2):
def setUp(self): def setUp(self):
super(TestNGRAPHTopkOp2, self).setUp() super(TestNGRAPHTopkOp2, self).setUp()
self._cpu_only = True
class TestNGRAPHTopkOp3(TestTopkOp3): class TestNGRAPHTopkOp3(TestTopkOp3):
def setUp(self): def setUp(self):
super(TestNGRAPHTopkOp3, self).setUp() super(TestNGRAPHTopkOp3, self).setUp()
self._cpu_only = True
class TestNGRAPHTopkOp4(TestTopkOp4): class TestNGRAPHTopkOp4(TestTopkOp4):
def setUp(self): def setUp(self):
super(TestNGRAPHTopkOp4, self).setUp() super(TestNGRAPHTopkOp4, self).setUp()
self._cpu_only = True
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册