未验证 提交 77a36f89 编写于 作者: S Shang Zhizhou 提交者: GitHub

[buf fix]:fix some unittests error (#27540)

* [buf fix]:fix unittest test_activation_op error

* split long-time unittests to smaller ones

* rename some unittests
上级 6e16a099
......@@ -132,9 +132,17 @@ if(NOT APPLE AND WITH_MKLML)
# seq_pool1
set(SEQ_POOL1_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/seq_pool")
download_model_and_data(${SEQ_POOL1_INSTALL_DIR} "seq_pool1_model_.tar.gz" "seq_pool1_data.txt.tar.gz")
inference_analysis_api_test(test_analyzer_seq_pool1 ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_tester.cc)
inference_analysis_api_test(test_analyzer_seq_pool1_compare_determine ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_compare_determine_tester.cc)
inference_analysis_api_test(test_analyzer_seq_pool1 ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_compare_tester.cc)
inference_analysis_api_test(test_analyzer_seq_pool1_fuse_compare_zero_copy ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_fuse_compare_zero_copy_tester.cc)
inference_analysis_api_test(test_analyzer_seq_pool1_fuse_statis ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_fuse_statis_tester.cc)
inference_analysis_api_test(test_analyzer_seq_pool1_profile ${SEQ_POOL1_INSTALL_DIR} analyzer_seq_pool1_profile_tester.cc)
if(NOT WIN32)
set_tests_properties(test_analyzer_seq_pool1 PROPERTIES TIMEOUT 150)
set_tests_properties(test_analyzer_seq_pool1_compare_determine PROPERTIES TIMEOUT 120)
set_tests_properties(test_analyzer_seq_pool1 PROPERTIES TIMEOUT 120)
set_tests_properties(test_analyzer_seq_pool1_fuse_compare_zero_copy PROPERTIES TIMEOUT 120)
set_tests_properties(test_analyzer_seq_pool1_fuse_statis PROPERTIES TIMEOUT 120)
set_tests_properties(test_analyzer_seq_pool1_profile PROPERTIES TIMEOUT 120)
endif()
else()
# TODO: fix this test on MACOS and OPENBLAS, the reason is that
......@@ -215,7 +223,15 @@ inference_analysis_api_test(test_analyzer_seq_conv1 ${SEQ_CONV1_INSTALL_DIR} ana
# transformer, the dataset only works on batch_size=8 now
set(TRANSFORMER_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/transformer")
download_model_and_data(${TRANSFORMER_INSTALL_DIR} "temp/transformer_model.tar.gz" "temp/transformer_data.txt.tar.gz")
inference_analysis_test(test_analyzer_transformer SRCS analyzer_transformer_tester.cc
inference_analysis_test(test_analyzer_transformer SRCS analyzer_transformer_compare_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${TRANSFORMER_INSTALL_DIR}/model --infer_data=${TRANSFORMER_INSTALL_DIR}/data.txt --batch_size=8
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI})
inference_analysis_test(test_analyzer_transformer_fuse SRCS analyzer_transformer_fuse_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${TRANSFORMER_INSTALL_DIR}/model --infer_data=${TRANSFORMER_INSTALL_DIR}/data.txt --batch_size=8
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI})
inference_analysis_test(test_analyzer_transformer_profile SRCS analyzer_transformer_profile_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${TRANSFORMER_INSTALL_DIR}/model --infer_data=${TRANSFORMER_INSTALL_DIR}/data.txt --batch_size=8
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI})
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <fstream>
#include <iostream>
#include "paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester_helper.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
// Compare Deterministic result
TEST(Analyzer_seq_pool1_compare_determine, compare_determine) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareDeterministic(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all);
}
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <fstream>
#include <iostream>
#include "paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester_helper.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
TEST(Analyzer_seq_pool1_compare, compare) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
}
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <fstream>
#include <iostream>
#include "paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester_helper.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
// Compare result of AnalysisConfig and AnalysisConfig + ZeroCopy
TEST(Analyzer_seq_pool1_compare_zero_copy, compare_zero_copy) {
AnalysisConfig cfg;
SetConfig(&cfg);
AnalysisConfig cfg1;
SetConfig(&cfg1);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
std::vector<std::string> outputs_name;
outputs_name.emplace_back(out_var_name);
CompareAnalysisAndZeroCopy(reinterpret_cast<PaddlePredictor::Config *>(&cfg),
reinterpret_cast<PaddlePredictor::Config *>(&cfg1),
input_slots_all, outputs_name);
}
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <fstream>
#include <iostream>
#include "paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester_helper.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
// Check the fuse status
TEST(Analyzer_seq_pool1_fuse_statis, fuse_statis) {
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
auto fuse_statis = GetFuseStatis(predictor.get(), &num_ops);
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
ASSERT_TRUE(fuse_statis.count("seqpool_concat_fuse"));
ASSERT_TRUE(fuse_statis.count("squared_mat_sub_fuse"));
ASSERT_TRUE(fuse_statis.count("repeated_fc_relu_fuse"));
ASSERT_EQ(fuse_statis.at("fc_fuse"), 10);
EXPECT_EQ(fuse_statis.at("seqpool_concat_fuse"), 2);
EXPECT_EQ(fuse_statis.at("squared_mat_sub_fuse"), 2);
EXPECT_EQ(fuse_statis.at("repeated_fc_relu_fuse"), 2);
LOG(INFO) << "num_ops: " << num_ops;
EXPECT_EQ(num_ops, 171);
}
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <fstream>
#include <iostream>
#include "paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester_helper.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
void profile(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg, use_mkldnn);
std::vector<std::vector<PaddleTensor>> outputs;
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all, &outputs, FLAGS_num_threads);
}
TEST(Analyzer_seq_pool1_profile, profile) { profile(); }
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
......@@ -11,15 +11,20 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <fstream>
#include <iostream>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace seq_pool1_tester {
// diff: similarity_norm.tmp_0, for speed: fc_4.tmp_1
static const char out_var_name[] = "reduce_sum_0.tmp_0";
......@@ -164,77 +169,7 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false) {
cfg->pass_builder()->InsertPass(2, "seqpool_concat_fuse_pass");
}
void profile(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg, use_mkldnn);
std::vector<std::vector<PaddleTensor>> outputs;
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all, &outputs, FLAGS_num_threads);
}
TEST(Analyzer_seq_pool1, profile) { profile(); }
// Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_seq_pool1, compare) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
}
// Compare Deterministic result
TEST(Analyzer_seq_pool1, compare_determine) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareDeterministic(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all);
}
// Check the fuse status
TEST(Analyzer_seq_pool1, fuse_statis) {
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
auto fuse_statis = GetFuseStatis(predictor.get(), &num_ops);
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
ASSERT_TRUE(fuse_statis.count("seqpool_concat_fuse"));
ASSERT_TRUE(fuse_statis.count("squared_mat_sub_fuse"));
ASSERT_TRUE(fuse_statis.count("repeated_fc_relu_fuse"));
ASSERT_EQ(fuse_statis.at("fc_fuse"), 10);
EXPECT_EQ(fuse_statis.at("seqpool_concat_fuse"), 2);
EXPECT_EQ(fuse_statis.at("squared_mat_sub_fuse"), 2);
EXPECT_EQ(fuse_statis.at("repeated_fc_relu_fuse"), 2);
LOG(INFO) << "num_ops: " << num_ops;
EXPECT_EQ(num_ops, 171);
}
// Compare result of AnalysisConfig and AnalysisConfig + ZeroCopy
TEST(Analyzer_seq_pool1, compare_zero_copy) {
AnalysisConfig cfg;
SetConfig(&cfg);
AnalysisConfig cfg1;
SetConfig(&cfg1);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
std::vector<std::string> outputs_name;
outputs_name.emplace_back(out_var_name);
CompareAnalysisAndZeroCopy(reinterpret_cast<PaddlePredictor::Config *>(&cfg),
reinterpret_cast<PaddlePredictor::Config *>(&cfg1),
input_slots_all, outputs_name);
}
} // namespace seq_pool1_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/analyzer_transformer_tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace transformer_tester {
void compare(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
}
TEST(Analyzer_Transformer, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); }
#endif
} // namespace transformer_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/analyzer_transformer_tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace transformer_tester {
// Check the fuse status
TEST(Analyzer_Transformer, fuse_statis) {
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
auto fuse_statis = GetFuseStatis(
static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
}
} // namespace transformer_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/analyzer_transformer_tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace transformer_tester {
void profile(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> outputs;
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all, &outputs, FLAGS_num_threads);
}
TEST(Analyzer_Transformer, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_Transformer, profile_mkldnn) { profile(true); }
#endif
} // namespace transformer_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
......@@ -11,11 +11,16 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
namespace analysis {
namespace transformer_tester {
struct DataRecord {
std::vector<std::vector<int64_t>> src_word, src_pos, trg_word, init_idx;
......@@ -182,57 +187,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
}
}
// Easy for profiling independently.
void profile(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> outputs;
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all, &outputs, FLAGS_num_threads);
}
TEST(Analyzer_Transformer, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_Transformer, profile_mkldnn) { profile(true); }
#endif
// Check the fuse status
TEST(Analyzer_Transformer, fuse_statis) {
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
auto fuse_statis = GetFuseStatis(
static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
}
// Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
}
TEST(Analyzer_Transformer, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); }
#endif
} // namespace transformer_tester
} // namespace analysis
} // namespace inference
} // namespace paddle
......@@ -28,6 +28,7 @@ from paddle.fluid import compiler, Program, program_guard
class TestSqrtOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
# The input type of sqrt op must be Variable or numpy.ndarray.
in1 = 1
......@@ -44,6 +45,7 @@ class TestSqrtOpError(unittest.TestCase):
class TestActivation(OpTest):
def setUp(self):
paddle.enable_static()
self.op_type = "exp"
self.init_dtype()
self.init_kernel_type()
......@@ -71,6 +73,7 @@ class TestActivation(OpTest):
class TestParameter(object):
def test_out_name(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = fluid.layers.data(name="X", shape=[1])
......@@ -92,6 +95,7 @@ class TestParameter(object):
class TestSigmoid(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "sigmoid"
self.init_dtype()
......@@ -112,6 +116,7 @@ class TestSigmoid(TestActivation):
class TestLogSigmoid(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "logsigmoid"
self.init_dtype()
......@@ -180,6 +185,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
class TestTanh(TestActivation, TestParameter):
def setUp(self):
paddle.enable_static()
self.op_type = "tanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
......@@ -255,6 +261,7 @@ class TestTanhAPI(unittest.TestCase):
class TestAtan(TestActivation, TestParameter):
def setUp(self):
paddle.enable_static()
self.op_type = "atan"
self.init_dtype()
......@@ -291,6 +298,7 @@ class TestAtan(TestActivation, TestParameter):
class TestSinh(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "sinh"
self.init_dtype()
......@@ -349,6 +357,7 @@ class TestSinh(TestActivation):
class TestSinhOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.sinh, 1)
......@@ -362,6 +371,7 @@ class TestSinhOpError(unittest.TestCase):
class TestCosh(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "cosh"
self.init_dtype()
......@@ -420,6 +430,7 @@ class TestCosh(TestActivation):
class TestCoshOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.cosh, 1)
......@@ -438,6 +449,7 @@ def ref_tanhshrink(x):
class TestTanhshrink(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "tanh_shrink"
self.init_dtype()
......@@ -512,6 +524,7 @@ def ref_hardshrink(x, threshold):
class TestHardShrink(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "hard_shrink"
self.init_dtype()
......@@ -541,6 +554,7 @@ class TestHardShrink_threshold_negative(TestHardShrink):
class TestHardShrinkAPI(unittest.TestCase):
# test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
def setUp(self):
paddle.enable_static()
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
......@@ -662,6 +676,7 @@ def ref_softshrink(x, threshold=0.5):
class TestSoftshrink(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "softshrink"
self.init_dtype()
......@@ -736,6 +751,7 @@ class TestSoftshrinkAPI(unittest.TestCase):
class TestSqrt(TestActivation, TestParameter):
def setUp(self):
paddle.enable_static()
self.op_type = "sqrt"
self.init_dtype()
......@@ -753,6 +769,7 @@ class TestSqrt(TestActivation, TestParameter):
class TestRsqrt(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "rsqrt"
self.init_dtype()
......@@ -770,6 +787,7 @@ class TestRsqrt(TestActivation):
class TestAbs(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "abs"
self.init_dtype()
......@@ -792,6 +810,7 @@ class TestAbs(TestActivation):
class TestCeil(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "ceil"
self.init_dtype()
......@@ -808,6 +827,7 @@ class TestCeil(TestActivation):
class TestFloor(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "floor"
self.init_dtype()
......@@ -826,6 +846,7 @@ class TestFloor(TestActivation):
class TestCos(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "cos"
self.init_dtype()
......@@ -843,6 +864,7 @@ class TestCos(TestActivation):
class TestAcos(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "acos"
self.init_dtype()
......@@ -860,6 +882,7 @@ class TestAcos(TestActivation):
class TestSin(TestActivation, TestParameter):
def setUp(self):
paddle.enable_static()
self.op_type = "sin"
self.init_dtype()
......@@ -877,6 +900,7 @@ class TestSin(TestActivation, TestParameter):
class TestAsin(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "asin"
self.init_dtype()
......@@ -894,6 +918,7 @@ class TestAsin(TestActivation):
class TestRound(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "round"
self.init_dtype()
......@@ -909,6 +934,7 @@ class TestRound(TestActivation):
class TestRelu(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "relu"
self.init_dtype()
......@@ -979,6 +1005,7 @@ class TestLeakyRelu(TestActivation):
return 0.02
def setUp(self):
paddle.enable_static()
self.op_type = "leaky_relu"
self.init_dtype()
alpha = self.get_alpha()
......@@ -1084,6 +1111,7 @@ def gelu(x, approximate):
class TestGeluApproximate(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "gelu"
self.init_dtype()
approximate = True
......@@ -1102,6 +1130,7 @@ class TestGeluApproximate(TestActivation):
class TestGelu(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "gelu"
self.init_dtype()
approximate = False
......@@ -1169,6 +1198,7 @@ class TestGELUAPI(unittest.TestCase):
class TestBRelu(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "brelu"
self.init_dtype()
......@@ -1194,6 +1224,7 @@ class TestBRelu(TestActivation):
class TestBReluOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.brelu, 1)
......@@ -1215,6 +1246,7 @@ def ref_relu6(x, threshold=6.0):
class TestRelu6(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "relu6"
self.init_dtype()
......@@ -1286,6 +1318,7 @@ class TestRelu6API(unittest.TestCase):
class TestHardSwish(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = 'hard_swish'
self.init_dtype()
......@@ -1310,6 +1343,7 @@ class TestHardSwish(TestActivation):
class TestHardSwishOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.hard_swish, 1)
......@@ -1323,6 +1357,7 @@ class TestHardSwishOpError(unittest.TestCase):
class TestSoftRelu(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "soft_relu"
self.init_dtype()
......@@ -1348,6 +1383,7 @@ class TestSoftRelu(TestActivation):
class TestSoftReluOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
......@@ -1366,6 +1402,7 @@ def elu(x, alpha):
class TestELU(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "elu"
self.init_dtype()
......@@ -1435,6 +1472,7 @@ class TestELUAPI(unittest.TestCase):
class TestReciprocal(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "reciprocal"
self.init_dtype()
......@@ -1452,6 +1490,7 @@ class TestReciprocal(TestActivation):
class TestLog(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "log"
self.init_dtype()
......@@ -1478,6 +1517,7 @@ class TestLog(TestActivation):
class TestLog1p(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "log1p"
self.init_dtype()
......@@ -1522,6 +1562,7 @@ class TestLog1p(TestActivation):
class TestSquare(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "square"
self.init_dtype()
......@@ -1539,6 +1580,7 @@ class TestSquare(TestActivation):
class TestPow(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "pow"
self.init_dtype()
......@@ -1557,6 +1599,7 @@ class TestPow(TestActivation):
class TestPow_factor_tensor(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "pow"
self.init_dtype()
......@@ -1633,6 +1676,7 @@ class TestPow_factor_tensor(TestActivation):
class TestSTanh(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "stanh"
self.init_dtype()
......@@ -1653,6 +1697,7 @@ class TestSTanh(TestActivation):
class TestSTanhOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.stanh, 1)
......@@ -1673,6 +1718,7 @@ def ref_softplus(x, beta=1, threshold=20):
class TestSoftplus(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "softplus"
self.init_dtype()
......@@ -1751,6 +1797,7 @@ def ref_softsign(x):
class TestSoftsign(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "softsign"
self.init_dtype()
......@@ -1818,6 +1865,7 @@ class TestSoftsignAPI(unittest.TestCase):
class TestThresholdedRelu(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "thresholded_relu"
self.init_dtype()
......@@ -1841,6 +1889,7 @@ class TestThresholdedRelu(TestActivation):
class TestThresholdedReluOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.thresholded_relu, 1)
......@@ -1854,6 +1903,7 @@ class TestThresholdedReluOpError(unittest.TestCase):
class TestHardSigmoid(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "hard_sigmoid"
self.init_dtype()
......@@ -1883,6 +1933,7 @@ class TestHardSigmoid(TestActivation):
class TestHardSigmoidOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.hard_sigmoid, 1)
......@@ -1896,6 +1947,7 @@ class TestHardSigmoidOpError(unittest.TestCase):
class TestSwish(TestActivation):
def setUp(self):
paddle.enable_static()
self.op_type = "swish"
self.init_dtype()
......@@ -1915,6 +1967,7 @@ class TestSwish(TestActivation):
class TestSwishOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.swish, 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册