diff --git a/paddle/fluid/operators/mlu/activation_op_mlu_test.cc b/paddle/fluid/operators/mlu/activation_op_mlu_test.cc index f88286288317bd8e7c09cbd23ecccfce5df98e7d..884521301750ce92c3f0a2e0b9468c5cc4a57790 100644 --- a/paddle/fluid/operators/mlu/activation_op_mlu_test.cc +++ b/paddle/fluid/operators/mlu/activation_op_mlu_test.cc @@ -21,7 +21,6 @@ limitations under the License. */ namespace fw = paddle::framework; namespace plat = paddle::platform; -namespace math = paddle::operators::math; USE_OP(relu); USE_OP_DEVICE_KERNEL(relu, MLU); diff --git a/paddle/fluid/operators/uniform_random_op_mlu.cc b/paddle/fluid/operators/uniform_random_op_mlu.cc index 1600bedc6b2fae9ba65a32e831eae4f43abeddf8..2c5f13f5a930788651c2e287febab7ad06aefd20 100644 --- a/paddle/fluid/operators/uniform_random_op_mlu.cc +++ b/paddle/fluid/operators/uniform_random_op_mlu.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/uniform_random_op.h" +#include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h" namespace paddle { @@ -57,14 +58,45 @@ class MLUUniformRandomKernel : public framework::OpKernel { tensor->mutable_data(ctx.GetPlace()); int64_t size = tensor->numel(); - const float min = static_cast(ctx.Attr("min")); - const float max = static_cast(ctx.Attr("max")); + + Tensor cpu_tensor(tensor->dtype()); + cpu_tensor.Resize(tensor->dims()); + T *data_cpu = cpu_tensor.mutable_data(platform::CPUPlace()); + + std::uniform_real_distribution dist( + static_cast(ctx.Attr("min")), + static_cast(ctx.Attr("max"))); unsigned int seed = static_cast(ctx.Attr("seed")); - // make mlu seed - MLUCnnlRandomGeneratorDesc random_desc(/*is_mlu200=*/false, seed); - cnnlDataType_t data_type = ToCnnlDataType(tensor->type()); - MLUCnnl::RandomUniform(ctx, size, /*data type=*/data_type, - random_desc.get(), min, max, GetBasePtr(tensor)); + auto engine = framework::GetCPURandomEngine(seed); + + for (int64_t i = 0; i < size; ++i) { + data_cpu[i] = dist(*engine); + } + + unsigned int diag_num = + static_cast(ctx.Attr("diag_num")); + unsigned int diag_step = + static_cast(ctx.Attr("diag_step")); + auto diag_val = static_cast(ctx.Attr("diag_val")); + if (diag_num > 0) { + PADDLE_ENFORCE_GT( + size, (diag_num - 1) * (diag_step + 1), + platform::errors::InvalidArgument( + "ShapeInvalid: the diagonal's elements is equal (num-1) " + "* (step-1) with num %d, step %d," + "It should be smaller than %d, but received %d", + diag_num, diag_step, (diag_num - 1) * (diag_step + 1), size)); + for (int64_t i = 0; i < diag_num; ++i) { + int64_t pos = i * diag_step + i; + data_cpu[pos] = diag_val; + } + } + + // copy to MLU + framework::TensorCopy( + cpu_tensor, ctx.GetPlace(), + ctx.template device_context(), tensor); + ctx.template device_context().Wait(); } }; diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index ed70a8638bf73561dab52d8420b734ff24987c15..41e5e0469dcb4099a89a0517207b8f6d8d39632b 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -1269,6 +1269,8 @@ function card_test() { CUDA_DEVICE_COUNT=1 elif [ "${WITH_ROCM}" == "ON" ];then CUDA_DEVICE_COUNT=$(rocm-smi -i | grep GPU | wc -l) + elif [ "${WITH_MLU}" == "ON" ];then + CUDA_DEVICE_COUNT=1 else CUDA_DEVICE_COUNT=$(nvidia-smi -L | wc -l) fi @@ -2102,6 +2104,130 @@ set -ex fi } +function parallel_test_base_mlu() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build/python/paddle/fluid/tests/unittests/mlu + if [ ${WITH_TESTING:-ON} == "ON" ] ; then + cat <> ${PADDLE_ROOT}/build/build_summary.txt + ut_actual_total_endTime_s=`date +%s` + echo "ipipe_log_param_actual_TestCases_Total_Time: $[ $ut_actual_total_endTime_s - $ut_actual_total_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt + if [[ "$EXIT_CODE" != "0" ]]; then + show_ut_retry_result + fi +set -ex + fi +} + function parallel_test() { mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build @@ -2117,6 +2243,8 @@ function parallel_test() { parallel_test_base_xpu elif [ "$WITH_ASCEND_CL" == "ON" ];then parallel_test_base_npu + elif [ "$WITH_MLU" == "ON" ];then + parallel_test_base_mlu else parallel_test_base_cpu ${PROC_RUN:-1} fi @@ -2873,6 +3001,11 @@ function main() { parallel_test check_coverage ;; + check_mlu_coverage) + cmake_gen_and_build ${PYTHON_ABI:-""} ${parallel_number} + parallel_test + check_coverage + ;; reuse_so_cicheck_py35) reuse_so_cache parallel_test diff --git a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py index e229966c12d245921b1b059efa011d07405ada94..5e5c4c9a301e9353497289082f322f63ae9a981e 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py @@ -23,6 +23,8 @@ import paddle import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +paddle.enable_static() + class TestAccuracyOp(OpTest): def setUp(self): @@ -132,5 +134,4 @@ class TestAccuracyAPI(unittest.TestCase): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index 2150e06381fac37e527d9d593f8752eb38ba1596..4cbff21dfc4965d8aa47955437ac90a7b62dd13e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -29,6 +29,7 @@ import paddle.fluid as fluid from paddle.fluid import Program, program_guard _set_use_system_allocator(True) +paddle.enable_static() def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): @@ -698,5 +699,4 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index f608344f6e0363864a76a23f8d8c10dace130149..7dd9dcdee57f99e71c4bb889e9a03ef248dbcfe7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -26,6 +26,8 @@ import paddle.fluid as fluid from paddle.fluid import Program, program_guard import paddle +paddle.enable_static() + class TestBatchNorm(unittest.TestCase): def test_name(self): @@ -291,5 +293,4 @@ class TestBatchNormUseGlobalStatsCase3(TestBatchNormUseGlobalStats): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py index 71f79c34d2312eda3952447a5748e66b44d1ab82..10356b124b2ea3a020bbfbf95b9cc0778f193a99 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py @@ -25,6 +25,8 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +paddle.enable_static() + class TestCastOpFp32ToFp16(OpTest): def setUp(self): @@ -119,17 +121,7 @@ class TestCastOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.MLUPlace(0)) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') - # The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16') - self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') - - def test_dtype_type(): - x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32') - output = fluid.layers.cast(x=x4, dtype='int16') - - self.assertRaises(TypeError, test_dtype_type) if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py index 3bfa96b70011238b48f55d628ad17794b84ff5de..ba37fcee15472a46cd2c64ddd5418f525e759569 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py @@ -176,7 +176,7 @@ def create_test_AxisTensor(parent): class TestConcatAxisTensor(parent): def setUp(self): self.op_type = "concat" - self.dtype = self.init_dtype() + self.init_dtype() self.init_test_data() self.inputs = { diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py index 5b6db6903fba0dc1d77b6026623a3bc3c101013c..3dc711c7d75e1e93bfc727e3979af8a9e0d55bd1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py @@ -23,6 +23,8 @@ from op_test import OpTest, skip_check_grad_ci import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +paddle.enable_static() + class TestElementwiseAddOp(OpTest): def set_mlu(self): @@ -523,5 +525,4 @@ class TestBoolAddFloatElementwiseAddop(unittest.TestCase): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index 6610127d382bd3a715b64ad359c500fefc595936..a43b7d0164d7bb3154999db1dcf61e79a537e021 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -27,6 +27,8 @@ import paddle.fluid as fluid import numpy as np from paddle.fluid import compiler, Program, program_guard +paddle.enable_static() + # Situation 1: Attr(shape) is a list(without tensor) class TestFillConstantOp1(OpTest): @@ -449,5 +451,4 @@ class TestFillConstantOpError(unittest.TestCase): if __name__ == "__main__": - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py index 97a945dc905715206a8042a7fa2ea9bacb5f015c..6f64196a586dd0069f9340286a97077a3676809b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py @@ -26,6 +26,8 @@ sys.path.append('..') from op_test import OpTest import paddle +paddle.enable_static() + class TestGaussianRandomOp(OpTest): def setUp(self): @@ -74,5 +76,4 @@ class TestMeanStdAreInt(TestGaussianRandomOp): if __name__ == "__main__": - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py index af09eabe787dc3d0ca4aa84852847f06cea39927..a2cd69fee325a6074ac356885b5f851b5140649c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py @@ -26,6 +26,8 @@ import paddle.fluid as fluid import numpy from test_momentum_op import calculate_momentum_by_numpy +paddle.enable_static() + class TestMomentumOp1(OpTest): def setUp(self): @@ -608,5 +610,4 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): if __name__ == "__main__": - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index fd442c6205e98d26b4797ff2ef4499b376bc8bdd..1be3d2d85a4220406c90d8b164d7d9b3731b9f87 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -27,6 +27,8 @@ sys.path.append('..') from op_test import OpTest from test_pool2d_op import pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive, adaptive_start_index, adaptive_end_index +paddle.enable_static() + def pool2d_backward_navie(x, ksize, @@ -1016,5 +1018,4 @@ class TestDygraphPool2DAPI(unittest.TestCase): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py index bb7f438c4ab2b5eaae7ac2df2f7c1978186b1a05..53254c738d985db028c7fdfe80b715a14ceaec03 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py @@ -25,6 +25,8 @@ import paddle.fluid.core as core from paddle.fluid.op import Operator from paddle.static import Program, program_guard +paddle.enable_static() + class TestScaleOp(OpTest): def setUp(self): @@ -201,5 +203,4 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph): if __name__ == "__main__": - paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py index 8ad0e787ab0cc277a4086f6c98f21f7d15fc09d7..366f783ce0d2a1bf482e5bbba0377cc445d26d1e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py @@ -22,6 +22,8 @@ from op_test import OpTest import paddle import paddle.fluid.core as core +paddle.enable_static() + class TestTopkOp(OpTest): def setUp(self): @@ -69,5 +71,4 @@ class TestTopkFP16Op(TestTopkOp): if __name__ == "__main__": - paddle.enable_static() unittest.main()