未验证 提交 91096ae2 编写于 作者: W Wang Xin 提交者: GitHub

remove boost compiler flags in flags.cmake (#47468)

上级 1e2a371c
......@@ -170,13 +170,7 @@ if(NOT WIN32)
if(NOT APPLE)
if((${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 8.0) OR (WITH_ROCM))
set(COMMON_FLAGS
${COMMON_FLAGS}
-Wno-format-truncation # Warning in boost gcc 8.2
-Wno-error=parentheses # Warning in boost gcc 8.2
-Wno-error=catch-value # Warning in boost gcc 8.2
-Wno-error=nonnull-compare # Warning in boost gcc 8.2
-Wno-error=address # Warning in boost gcc 8.2
-Wno-ignored-qualifiers # Warning in boost gcc 8.2
${COMMON_FLAGS} -Wno-ignored-qualifiers # Warning in Paddle-Lite
-Wno-ignored-attributes # Warning in Eigen gcc 8.3
-Wno-parentheses # Warning in Eigen gcc 8.3
)
......
......@@ -68,7 +68,7 @@ class DGCOpKernel : public framework::OpKernel<T> {
// nranks
auto nranks_tensor = ctx.Input<phi::DenseTensor>("nranks");
const int nranks = static_cast<const int>(*nranks_tensor->data<float>());
const int nranks = static_cast<int>(*nranks_tensor->data<float>());
PADDLE_ENFORCE_GT(nranks,
1,
platform::errors::PreconditionNotMet(
......
......@@ -1610,9 +1610,9 @@ MLURNNDesc::~MLURNNDesc() {
const float alpha1_float,
const float alpha2_float,
const float beta_float) {
const int alpha1_int = static_cast<const int>(alpha1_float);
const int alpha2_int = static_cast<const int>(alpha2_float);
const int beta_int = static_cast<const int>(beta_float);
const int alpha1_int = static_cast<int>(alpha1_float);
const int alpha2_int = static_cast<int>(alpha2_float);
const int beta_int = static_cast<int>(beta_float);
const void* alpha1_ptr = static_cast<const void*>(&alpha1_float);
const void* alpha2_ptr = static_cast<const void*>(&alpha2_float);
......
......@@ -39,7 +39,7 @@ class DGCMomentumKernel : public framework::OpKernel<T> {
// nranks
auto nranks_tensor = context.Input<phi::DenseTensor>("nranks");
const int nranks = static_cast<const int>(*nranks_tensor->data<float>());
const int nranks = static_cast<int>(*nranks_tensor->data<float>());
PADDLE_ENFORCE_GT(
nranks,
1,
......
......@@ -33,7 +33,7 @@ class XPULogsumexpKernel : public framework::OpKernel<T> {
const auto& input_dim_size = input->dims().size();
// The dims has full dim, set the reduce_all is True
reduce_all |= (static_cast<const int>(axis.size()) == input_dim_size);
reduce_all |= (static_cast<int>(axis.size()) == input_dim_size);
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
......
......@@ -135,7 +135,7 @@ void LayerNormKernel(const Context& dev_ctx,
scale ? scale->data<T>() : nullptr,
bias ? bias->data<T>() : nullptr,
static_cast<int>(left),
static_cast<const float>(epsilon),
static_cast<float>(epsilon),
right);
#endif
}
......
......@@ -61,7 +61,7 @@ void LogsumexpGradKernel(const Context& dev_ctx,
dev_ctx.template Alloc<T>(in_grad);
const auto input_dim_size = in.dims().size();
reduce_all |= (static_cast<const int>(axis.size()) == input_dim_size);
reduce_all |= (static_cast<int>(axis.size()) == input_dim_size);
if (reduce_all) {
auto x = phi::EigenVector<T>::Flatten(in);
......
......@@ -71,7 +71,7 @@ void LogsumexpKernel(const Context& dev_ctx,
const auto& input_dim_size = x.dims().size();
// The dims has full dim, set the reduce_all is True
reduce_all |= (static_cast<const int>(axis.size()) == input_dim_size);
reduce_all |= (static_cast<int>(axis.size()) == input_dim_size);
if (reduce_all) {
// Flatten and reduce 1-D tensor
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册