未验证 提交 17e4be21 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] No.34,36 enable...

[clang-tidy] No.34,36 enable performance-noexcept-move-constructor,modernize-use-transparent-functors (#56261)

* fix

* fix

* CI

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* CI

* fix

* CI
上级 962f67d2
......@@ -186,7 +186,7 @@ modernize-use-equals-default,
-modernize-use-noexcept,
modernize-use-nullptr,
modernize-use-override,
-modernize-use-transparent-functors,
modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions,
performance-faster-string-find,
-performance-for-range-copy,
......@@ -197,7 +197,7 @@ performance-inefficient-string-concatenation,
-performance-move-const-arg,
-performance-move-constructor-init,
-performance-no-automatic-move,
-performance-noexcept-move-constructor,
performance-noexcept-move-constructor,
-performance-trivially-destructible,
-performance-type-promotion-in-math-fn,
-performance-unnecessary-copy-initialization,
......
......@@ -78,7 +78,8 @@ DistModelDataBuf& DistModelDataBuf::operator=(const DistModelDataBuf& other) {
return *this;
}
DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) {
DistModelDataBuf& DistModelDataBuf::operator=(
DistModelDataBuf&& other) noexcept {
data_ = other.data_;
memory_owned_ = other.memory_owned_;
length_ = other.length_;
......@@ -88,7 +89,7 @@ DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) {
return *this;
}
DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other)
DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other) noexcept
: data_(other.data_),
length_(other.length_),
memory_owned_(other.memory_owned_) {
......
......@@ -61,8 +61,8 @@ class DistModelDataBuf {
void Resize(size_t length);
DistModelDataBuf& operator=(const DistModelDataBuf& other);
DistModelDataBuf& operator=(DistModelDataBuf&& other);
DistModelDataBuf(DistModelDataBuf&& other);
DistModelDataBuf& operator=(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(const DistModelDataBuf& other);
private:
......
......@@ -70,7 +70,7 @@ static int64_t GetMemorySize(
std::accumulate(dims.begin(),
dims.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>());
std::multiplies<int64_t>()); // NOLINT
}
// Split all variables in the graph into phi::DenseTensor and
......
......@@ -121,7 +121,7 @@ int64_t MemoryReusePass::GetMemorySize(const details::VarHandle &var) const {
return std::accumulate(shapes.begin(),
shapes.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) *
std::multiplies<>()) *
sizeof_dtype;
}
......
......@@ -354,8 +354,9 @@ void ConvBiasFusePass::FuseConvBias(ir::Graph* graph,
"must have same shape, but they are different: %s, %s.",
conv_bias_tensor->dims(),
eltwise_bias_tensor->dims()));
*conv_bias_tensor = tensor_apply_eltwise(
*conv_bias_tensor, *eltwise_bias_tensor, std::plus<float>());
*conv_bias_tensor = tensor_apply_eltwise(*conv_bias_tensor,
*eltwise_bias_tensor,
std::plus<float>()); // NOLINT
conv->Op()->SetOutput("Output",
std::vector<std::string>({eltwise_out->Name()}));
......
......@@ -141,14 +141,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape1_shape[i] =
std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
......@@ -160,14 +161,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape2_shape[i] =
std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
......
......@@ -141,14 +141,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape1_shape[i] =
std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
......@@ -160,14 +161,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape2_shape[i] =
std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
......
......@@ -97,7 +97,7 @@ void MemoryOptimizePass::CollectLifeCycle(
auto var_bytes = std::accumulate(in_shape.begin(),
in_shape.end(),
(int64_t)1,
std::multiplies<int64_t>());
std::multiplies<>());
persis_byte +=
paddle::framework::SizeOfType(node->Var()->GetDataType()) *
var_bytes;
......@@ -183,8 +183,8 @@ void MemoryOptimizePass::CollectVarMemorySize(
if (v < 0) v = fake_batch_size;
}
int size = std::accumulate(
shape.begin(), shape.end(), 1, std::multiplies<int>());
int size =
std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>());
(*space_table)[node->Var()->Name()] =
size * paddle::framework::SizeOfType(node->Var()->GetDataType());
}
......
......@@ -40,7 +40,7 @@ int PaddleDtypeSize(PaddleDType dtype) {
}
}
PaddleBuf::PaddleBuf(PaddleBuf &&other)
PaddleBuf::PaddleBuf(PaddleBuf &&other) noexcept
: data_(other.data_),
length_(other.length_),
memory_owned_(other.memory_owned_) {
......@@ -74,7 +74,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
return *this;
}
PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) {
PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) noexcept {
// only the buffer with external memory can be copied
data_ = other.data_;
length_ = other.length_;
......
......@@ -137,9 +137,9 @@ class PD_INFER_DECL PaddleBuf {
~PaddleBuf() { Free(); }
PaddleBuf& operator=(const PaddleBuf&);
PaddleBuf& operator=(PaddleBuf&&);
PaddleBuf& operator=(PaddleBuf&&) noexcept;
PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other);
PaddleBuf(PaddleBuf&& other) noexcept;
private:
void Free();
......
......@@ -21,10 +21,11 @@ namespace operators {
// Shape of bitmask
static framework::DDim GetBitmaskDims(std::vector<int> out_shape) {
int c = out_shape.back();
int64_t nhw =
std::accumulate(
out_shape.begin(), out_shape.end(), 1, std::multiplies<int>()) /
c;
int64_t nhw = std::accumulate(out_shape.begin(),
out_shape.end(),
1,
std::multiplies<int>()) / // NOLINT
c;
int32_t c_int32_elems = ((c + 63) & ~63) / 32;
int32_t nhw_int32_elems = ((nhw + 31) & ~31);
std::vector<int> bitmask_shape = {nhw_int32_elems, c_int32_elems, 1};
......
......@@ -18,13 +18,13 @@ InterfaceValue::~InterfaceValue() {
if (model_) free(model_);
}
InterfaceValue::InterfaceValue(InterfaceValue&& val) {
InterfaceValue::InterfaceValue(InterfaceValue&& val) noexcept {
type_id_ = val.type_id_;
model_ = val.model_;
val.model_ = nullptr;
}
InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) {
InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept {
swap(std::move(val));
return *this;
}
......
......@@ -43,9 +43,9 @@ class IR_API InterfaceValue {
InterfaceValue() = default;
explicit InterfaceValue(TypeId type_id) : type_id_(type_id) {}
InterfaceValue(const InterfaceValue &) = delete;
InterfaceValue(InterfaceValue &&);
InterfaceValue(InterfaceValue &&) noexcept;
InterfaceValue &operator=(const InterfaceValue &) = delete;
InterfaceValue &operator=(InterfaceValue &&);
InterfaceValue &operator=(InterfaceValue &&) noexcept;
~InterfaceValue();
void swap(InterfaceValue &&val) {
using std::swap;
......
......@@ -96,7 +96,7 @@ class PADDLE_API Tensor final {
/**
* @brief Construct a new Tensor object by move
*/
Tensor(Tensor&&) = default;
Tensor(Tensor&&) noexcept = default;
/**
* @brief Construct a new Tensor object by a TensorBase pointer
......@@ -522,7 +522,7 @@ class PADDLE_API Tensor final {
* @param x
* @return Tensor&
*/
Tensor& operator=(Tensor&& x) &;
Tensor& operator=(Tensor&& x) & noexcept;
/**
* @brief Tensor operants
......
......@@ -399,7 +399,7 @@ void Tensor::reset() {
Tensor &Tensor::operator=(const Tensor &x) & = default;
Tensor &Tensor::operator=(Tensor &&x) & {
Tensor &Tensor::operator=(Tensor &&x) &noexcept {
impl_ = std::move(x.impl_);
autograd_meta_ = std::move(x.autograd_meta_);
name_ = std::move(x.name_);
......
......@@ -65,9 +65,9 @@ CPUContext::CPUContext(const Place& place)
CPUContext::~CPUContext() = default;
CPUContext::CPUContext(CPUContext&&) = default;
CPUContext::CPUContext(CPUContext&&) = default; // NOLINT
CPUContext& CPUContext::operator=(CPUContext&&) = default;
CPUContext& CPUContext::operator=(CPUContext&&) = default; // NOLINT
Eigen::DefaultDevice* CPUContext::eigen_device() const {
return impl_->GetEigenDevice();
......
......@@ -814,9 +814,9 @@ struct GPUContext::Impl {
thread_local AttributeMap GPUContext::Impl::dnn_attrs_ = {};
GPUContext::GPUContext(GPUContext&&) = default;
GPUContext::GPUContext(GPUContext&&) = default; // NOLINT
GPUContext& GPUContext::operator=(GPUContext&&) = default;
GPUContext& GPUContext::operator=(GPUContext&&) = default; // NOLINT
GPUContext::GPUContext(const GPUPlace& place, bool init, int stream_priority)
: DeviceContext(), impl_(std::make_unique<Impl>(place)) {
......
......@@ -79,7 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) {
return *this;
}
DenseTensor& DenseTensor::operator=(DenseTensor&& other) {
DenseTensor& DenseTensor::operator=(DenseTensor&& other) noexcept {
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
storage_properties_ = std::move(other.storage_properties_);
......
......@@ -69,7 +69,7 @@ class DenseTensor : public TensorBase,
/// \brief DenseTensor shallow copy assignment.
DenseTensor& operator=(const DenseTensor& other);
DenseTensor& operator=(DenseTensor&& other);
DenseTensor& operator=(DenseTensor&& other) noexcept;
DenseTensor();
......
......@@ -312,12 +312,12 @@ DeviceContext::DeviceContext(const DeviceContext& other) {
#endif
}
DeviceContext::DeviceContext(DeviceContext&& other) {
DeviceContext::DeviceContext(DeviceContext&& other) noexcept {
impl_ = std::move(other.impl_);
}
DeviceContext& DeviceContext::operator=(DeviceContext&& other) = default;
DeviceContext& DeviceContext::operator=(DeviceContext&& other) noexcept =
default;
DeviceContext::~DeviceContext() = default;
void DeviceContext::SetAllocator(const Allocator* allocator) {
......
......@@ -48,12 +48,12 @@ class PADDLE_API DeviceContext {
/**
* @brief Move construct.
*/
DeviceContext(DeviceContext&&);
DeviceContext(DeviceContext&&) noexcept;
/**
* @brief Move assign operator.
*/
DeviceContext& operator=(DeviceContext&&);
DeviceContext& operator=(DeviceContext&&) noexcept;
/**
* @brief Default destruct.
......
......@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
}
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) noexcept {
this->non_zero_elements_ = other.non_zero_elements_;
this->non_zero_indices_ = other.non_zero_indices_;
this->coalesced_ = other.coalesced_;
......
......@@ -55,7 +55,7 @@ class SparseCooTensor : public TensorBase,
SparseCooTensor(const SparseCooTensor& other);
/// \brief move constructor
SparseCooTensor(SparseCooTensor&& other);
SparseCooTensor(SparseCooTensor&& other) noexcept;
/// \brief SparseCooTensor shallow copy assignment.
SparseCooTensor& operator=(const SparseCooTensor& other);
......
......@@ -49,7 +49,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
return *this;
}
StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT
StringTensor& StringTensor::operator=(StringTensor&& other) noexcept {
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
return *this;
......
......@@ -56,7 +56,7 @@ class StringTensor : public TensorBase,
/// \brief StringTensor shallow copy assignment.
StringTensor& operator=(const StringTensor& other);
StringTensor& operator=(StringTensor&& other);
StringTensor& operator=(StringTensor&& other) noexcept;
/// \brief Destroy the tensor object and release exclusive resources.
virtual ~StringTensor() = default;
......
......@@ -63,7 +63,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx,
auto* label_ptr = label.data<T>();
// get unique positive class center by ascending
std::set<T, std::less<T>> unique_label;
std::set<T, std::less<T>> unique_label; // NOLINT
for (int64_t i = 0; i < numel; ++i) {
unique_label.insert(label_ptr[i]);
}
......
......@@ -63,7 +63,7 @@ void SampleWeightedNeighbors(
bool return_eids) {
std::priority_queue<phi::GraphWeightedNode<T>,
std::vector<phi::GraphWeightedNode<T>>,
std::greater<phi::GraphWeightedNode<T>>>
std::greater<phi::GraphWeightedNode<T>>> // NOLINT
min_heap;
for (size_t i = 0; i < out_src.size(); i++) {
float weight_key = log2(dice_distribution(rng)) * (1 / out_weight[i]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册