未验证 提交 7500ff61 编写于 作者: R risemeup1 提交者: GitHub

Fix gcc12_error (#52085)

* fix error,test=document_fix

* test

* fix gcc12_error

* fix gcc12_error

* fix gcc12_error

* fix_gcc12_py3_error

* fix_range-loop-construct_error

* fix_gcc12_error
上级 b7db6af2
...@@ -28,7 +28,7 @@ set(GTEST_INCLUDE_DIR ...@@ -28,7 +28,7 @@ set(GTEST_INCLUDE_DIR
CACHE PATH "gtest include directory." FORCE) CACHE PATH "gtest include directory." FORCE)
set(GTEST_REPOSITORY ${GIT_URL}/google/googletest.git) set(GTEST_REPOSITORY ${GIT_URL}/google/googletest.git)
set(GTEST_TAG release-1.8.1) set(GTEST_TAG release-1.8.1)
set(GTEST_SOURCE_DIR ${THIRD_PARTY_PATH}/gtest/src/extern_gtest)
include_directories(${GTEST_INCLUDE_DIR}) include_directories(${GTEST_INCLUDE_DIR})
if(WIN32) if(WIN32)
...@@ -63,7 +63,12 @@ if(WITH_MKLML) ...@@ -63,7 +63,12 @@ if(WITH_MKLML)
# wait for mklml downloading completed # wait for mklml downloading completed
set(GTEST_DEPENDS ${MKLML_PROJECT}) set(GTEST_DEPENDS ${MKLML_PROJECT})
endif() endif()
if(${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 12.0)
file(TO_NATIVE_PATH
${PADDLE_SOURCE_DIR}/patches/gtest/gtest-death-test.cc.patch native_src)
set(GTEST_PATCH_COMMAND patch -d ${GTEST_SOURCE_DIR}/googletest/src <
${native_src})
endif()
ExternalProject_Add( ExternalProject_Add(
extern_gtest extern_gtest
${EXTERNAL_PROJECT_LOG_ARGS} ${SHALLOW_CLONE} ${EXTERNAL_PROJECT_LOG_ARGS} ${SHALLOW_CLONE}
......
...@@ -48,7 +48,7 @@ TEST(Benchmark, EagerScaleCPU) { ...@@ -48,7 +48,7 @@ TEST(Benchmark, EagerScaleCPU) {
// Prepare Device Contexts // Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace()); eager_test::InitEnv(paddle::platform::CPUPlace());
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
paddle::Tensor tensor = CreateTensorWithValue(ddim, paddle::Tensor tensor = CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
...@@ -87,7 +87,7 @@ TEST(Benchmark, EagerMatmulCPU) { ...@@ -87,7 +87,7 @@ TEST(Benchmark, EagerMatmulCPU) {
// Prepare Device Contexts // Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace()); eager_test::InitEnv(paddle::platform::CPUPlace());
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
...@@ -137,7 +137,7 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) { ...@@ -137,7 +137,7 @@ TEST(Benchmark, EagerIntermediateMatmulCPU) {
auto tracer = std::make_shared<paddle::imperative::Tracer>(); auto tracer = std::make_shared<paddle::imperative::Tracer>();
paddle::imperative::SetCurrentTracer(tracer); paddle::imperative::SetCurrentTracer(tracer);
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({2, 2}); paddle::framework::DDim ddimX = phi::make_ddim({2, 2});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
...@@ -187,7 +187,7 @@ TEST(Benchmark, EagerIntermediateMLPCPU) { ...@@ -187,7 +187,7 @@ TEST(Benchmark, EagerIntermediateMLPCPU) {
auto tracer = std::make_shared<paddle::imperative::Tracer>(); auto tracer = std::make_shared<paddle::imperative::Tracer>();
paddle::imperative::SetCurrentTracer(tracer); paddle::imperative::SetCurrentTracer(tracer);
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N}); paddle::framework::DDim ddimX = phi::make_ddim({MLP_M, MLP_N});
paddle::Tensor X = CreateTensorWithValue(ddimX, paddle::Tensor X = CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
......
...@@ -51,7 +51,7 @@ TEST(Benchmark, FluidScaleCPU) { ...@@ -51,7 +51,7 @@ TEST(Benchmark, FluidScaleCPU) {
platform::CPUPlace place; platform::CPUPlace place;
eager_test::InitEnv(place); eager_test::InitEnv(place);
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
std::shared_ptr<imperative::VarBase> X(new imperative::VarBase(true, "X")); std::shared_ptr<imperative::VarBase> X(new imperative::VarBase(true, "X"));
X->SetOverridedStopGradient(false); X->SetOverridedStopGradient(false);
...@@ -97,7 +97,7 @@ TEST(Benchmark, FluidMatmulCPU) { ...@@ -97,7 +97,7 @@ TEST(Benchmark, FluidMatmulCPU) {
platform::CPUPlace place; platform::CPUPlace place;
eager_test::InitEnv(place); eager_test::InitEnv(place);
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
std::shared_ptr<imperative::VarBase> X(new imperative::VarBase(true, "X")); std::shared_ptr<imperative::VarBase> X(new imperative::VarBase(true, "X"));
X->SetOverridedStopGradient(false); X->SetOverridedStopGradient(false);
std::shared_ptr<imperative::VarBase> Y(new imperative::VarBase(true, "Y")); std::shared_ptr<imperative::VarBase> Y(new imperative::VarBase(true, "Y"));
...@@ -156,7 +156,7 @@ TEST(Benchmark, FluidMLPCPU) { ...@@ -156,7 +156,7 @@ TEST(Benchmark, FluidMLPCPU) {
platform::CPUPlace place; platform::CPUPlace place;
eager_test::InitEnv(place); eager_test::InitEnv(place);
for (const std::string& mode : {"Accuracy", "Performance"}) { for (const std::string mode : {"Accuracy", "Performance"}) {
std::vector<float> x_src_data(MLP_M * MLP_N, MLP_X_VAL); std::vector<float> x_src_data(MLP_M * MLP_N, MLP_X_VAL);
std::vector<float> w_src_data(MLP_N * MLP_K, MLP_W_VAL); std::vector<float> w_src_data(MLP_N * MLP_K, MLP_W_VAL);
std::vector<float> b_src_data(MLP_K, MLP_B_VAL); std::vector<float> b_src_data(MLP_K, MLP_B_VAL);
......
...@@ -119,7 +119,7 @@ class DistributedLookupTableKernel : public framework::OpKernel<T> { ...@@ -119,7 +119,7 @@ class DistributedLookupTableKernel : public framework::OpKernel<T> {
auto *id_tensor = id_vars[i]->GetMutable<phi::DenseTensor>(); auto *id_tensor = id_vars[i]->GetMutable<phi::DenseTensor>();
auto *out_tensor = out_vars[i]->GetMutable<phi::DenseTensor>(); auto *out_tensor = out_vars[i]->GetMutable<phi::DenseTensor>();
auto id_dims = id_tensor->dims(); auto id_dims = phi::vectorize<int64_t>(id_tensor->dims());
out_tensor->Resize(phi::make_ddim({static_cast<int64_t>(id_dims[0]), out_tensor->Resize(phi::make_ddim({static_cast<int64_t>(id_dims[0]),
static_cast<int64_t>(id_dims[1]), static_cast<int64_t>(id_dims[1]),
static_cast<int64_t>(emb_dim)})); static_cast<int64_t>(emb_dim)}));
......
...@@ -51,7 +51,7 @@ TEST(ProfilerTest, TestHostTracer) { ...@@ -51,7 +51,7 @@ TEST(ProfilerTest, TestHostTracer) {
auto profiler_result = profiler->Stop(); auto profiler_result = profiler->Stop();
auto nodetree = profiler_result->GetNodeTrees(); auto nodetree = profiler_result->GetNodeTrees();
std::set<std::string> host_events; std::set<std::string> host_events;
for (const auto pair : nodetree->Traverse(true)) { for (const auto& pair : nodetree->Traverse(true)) {
for (const auto evt : pair.second) { for (const auto evt : pair.second) {
host_events.insert(evt->Name()); host_events.insert(evt->Name());
} }
...@@ -84,7 +84,7 @@ TEST(ProfilerTest, TestCudaTracer) { ...@@ -84,7 +84,7 @@ TEST(ProfilerTest, TestCudaTracer) {
auto profiler_result = profiler->Stop(); auto profiler_result = profiler->Stop();
auto nodetree = profiler_result->GetNodeTrees(); auto nodetree = profiler_result->GetNodeTrees();
std::vector<std::string> runtime_events; std::vector<std::string> runtime_events;
for (const auto pair : nodetree->Traverse(true)) { for (const auto& pair : nodetree->Traverse(true)) {
for (const auto host_node : pair.second) { for (const auto host_node : pair.second) {
for (auto runtime_node : host_node->GetRuntimeTraceEventNodes()) { for (auto runtime_node : host_node->GetRuntimeTraceEventNodes()) {
runtime_events.push_back(runtime_node->Name()); runtime_events.push_back(runtime_node->Name());
......
diff --git a/googletest/src/gtest-death-test.cc b/googletest/src/gtest-death-test.cc
index 0908355..dfcfe34 100644
--- a/googletest/src/gtest-death-test.cc
+++ b/googletest/src/gtest-death-test.cc
@@ -1212,7 +1212,7 @@ static int ExecDeathTestChildMain(void* child_arg) {
static void StackLowerThanAddress(const void* ptr,
bool* result) GTEST_NO_INLINE_;
static void StackLowerThanAddress(const void* ptr, bool* result) {
- int dummy;
+ int dummy=0;
*result = (&dummy < ptr);
}
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <memory>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/phi/core/utils/type_registry.h" #include "paddle/phi/core/utils/type_registry.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册