未验证 提交 17e4be21 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] No.34,36 enable...

[clang-tidy] No.34,36 enable performance-noexcept-move-constructor,modernize-use-transparent-functors (#56261)

* fix

* fix

* CI

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* CI

* fix

* CI
上级 962f67d2
...@@ -186,7 +186,7 @@ modernize-use-equals-default, ...@@ -186,7 +186,7 @@ modernize-use-equals-default,
-modernize-use-noexcept, -modernize-use-noexcept,
modernize-use-nullptr, modernize-use-nullptr,
modernize-use-override, modernize-use-override,
-modernize-use-transparent-functors, modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions, -modernize-use-uncaught-exceptions,
performance-faster-string-find, performance-faster-string-find,
-performance-for-range-copy, -performance-for-range-copy,
...@@ -197,7 +197,7 @@ performance-inefficient-string-concatenation, ...@@ -197,7 +197,7 @@ performance-inefficient-string-concatenation,
-performance-move-const-arg, -performance-move-const-arg,
-performance-move-constructor-init, -performance-move-constructor-init,
-performance-no-automatic-move, -performance-no-automatic-move,
-performance-noexcept-move-constructor, performance-noexcept-move-constructor,
-performance-trivially-destructible, -performance-trivially-destructible,
-performance-type-promotion-in-math-fn, -performance-type-promotion-in-math-fn,
-performance-unnecessary-copy-initialization, -performance-unnecessary-copy-initialization,
......
...@@ -78,7 +78,8 @@ DistModelDataBuf& DistModelDataBuf::operator=(const DistModelDataBuf& other) { ...@@ -78,7 +78,8 @@ DistModelDataBuf& DistModelDataBuf::operator=(const DistModelDataBuf& other) {
return *this; return *this;
} }
DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) { DistModelDataBuf& DistModelDataBuf::operator=(
DistModelDataBuf&& other) noexcept {
data_ = other.data_; data_ = other.data_;
memory_owned_ = other.memory_owned_; memory_owned_ = other.memory_owned_;
length_ = other.length_; length_ = other.length_;
...@@ -88,7 +89,7 @@ DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) { ...@@ -88,7 +89,7 @@ DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) {
return *this; return *this;
} }
DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other) DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other) noexcept
: data_(other.data_), : data_(other.data_),
length_(other.length_), length_(other.length_),
memory_owned_(other.memory_owned_) { memory_owned_(other.memory_owned_) {
......
...@@ -61,8 +61,8 @@ class DistModelDataBuf { ...@@ -61,8 +61,8 @@ class DistModelDataBuf {
void Resize(size_t length); void Resize(size_t length);
DistModelDataBuf& operator=(const DistModelDataBuf& other); DistModelDataBuf& operator=(const DistModelDataBuf& other);
DistModelDataBuf& operator=(DistModelDataBuf&& other); DistModelDataBuf& operator=(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(DistModelDataBuf&& other); DistModelDataBuf(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(const DistModelDataBuf& other); DistModelDataBuf(const DistModelDataBuf& other);
private: private:
......
...@@ -70,7 +70,7 @@ static int64_t GetMemorySize( ...@@ -70,7 +70,7 @@ static int64_t GetMemorySize(
std::accumulate(dims.begin(), std::accumulate(dims.begin(),
dims.end(), dims.end(),
static_cast<int64_t>(1), static_cast<int64_t>(1),
std::multiplies<int64_t>()); std::multiplies<int64_t>()); // NOLINT
} }
// Split all variables in the graph into phi::DenseTensor and // Split all variables in the graph into phi::DenseTensor and
......
...@@ -121,7 +121,7 @@ int64_t MemoryReusePass::GetMemorySize(const details::VarHandle &var) const { ...@@ -121,7 +121,7 @@ int64_t MemoryReusePass::GetMemorySize(const details::VarHandle &var) const {
return std::accumulate(shapes.begin(), return std::accumulate(shapes.begin(),
shapes.end(), shapes.end(),
static_cast<int64_t>(1), static_cast<int64_t>(1),
std::multiplies<int64_t>()) * std::multiplies<>()) *
sizeof_dtype; sizeof_dtype;
} }
......
...@@ -354,8 +354,9 @@ void ConvBiasFusePass::FuseConvBias(ir::Graph* graph, ...@@ -354,8 +354,9 @@ void ConvBiasFusePass::FuseConvBias(ir::Graph* graph,
"must have same shape, but they are different: %s, %s.", "must have same shape, but they are different: %s, %s.",
conv_bias_tensor->dims(), conv_bias_tensor->dims(),
eltwise_bias_tensor->dims())); eltwise_bias_tensor->dims()));
*conv_bias_tensor = tensor_apply_eltwise( *conv_bias_tensor = tensor_apply_eltwise(*conv_bias_tensor,
*conv_bias_tensor, *eltwise_bias_tensor, std::plus<float>()); *eltwise_bias_tensor,
std::plus<float>()); // NOLINT
conv->Op()->SetOutput("Output", conv->Op()->SetOutput("Output",
std::vector<std::string>({eltwise_out->Name()})); std::vector<std::string>({eltwise_out->Name()}));
......
...@@ -141,14 +141,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -141,14 +141,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) { if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info // there is no sufficient info
if (!all_positive) return; if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(), reshape1_shape[i] =
x_shape1.end(), std::accumulate(x_shape1.begin(),
static_cast<int64_t>(1), x_shape1.end(),
std::multiplies<int64_t>()) / static_cast<int64_t>(1),
std::accumulate(reshape1_shape.begin(), std::multiplies<int64_t>()) / // NOLINT
reshape1_shape.end(), std::accumulate(reshape1_shape.begin(),
static_cast<int64_t>(-1), reshape1_shape.end(),
std::multiplies<int64_t>()); static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break; break;
} }
} }
...@@ -160,14 +161,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -160,14 +161,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) { if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info // there is no sufficient info
if (!all_positive) return; if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(), reshape2_shape[i] =
x_shape2.end(), std::accumulate(x_shape2.begin(),
static_cast<int64_t>(1), x_shape2.end(),
std::multiplies<int64_t>()) / static_cast<int64_t>(1),
std::accumulate(reshape2_shape.begin(), std::multiplies<int64_t>()) / // NOLINT
reshape2_shape.end(), std::accumulate(reshape2_shape.begin(),
static_cast<int64_t>(-1), reshape2_shape.end(),
std::multiplies<int64_t>()); static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break; break;
} }
} }
......
...@@ -141,14 +141,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -141,14 +141,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) { if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info // there is no sufficient info
if (!all_positive) return; if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(), reshape1_shape[i] =
x_shape1.end(), std::accumulate(x_shape1.begin(),
static_cast<int64_t>(1), x_shape1.end(),
std::multiplies<int64_t>()) / static_cast<int64_t>(1),
std::accumulate(reshape1_shape.begin(), std::multiplies<int64_t>()) / // NOLINT
reshape1_shape.end(), std::accumulate(reshape1_shape.begin(),
static_cast<int64_t>(-1), reshape1_shape.end(),
std::multiplies<int64_t>()); static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break; break;
} }
} }
...@@ -160,14 +161,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -160,14 +161,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) { if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info // there is no sufficient info
if (!all_positive) return; if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(), reshape2_shape[i] =
x_shape2.end(), std::accumulate(x_shape2.begin(),
static_cast<int64_t>(1), x_shape2.end(),
std::multiplies<int64_t>()) / static_cast<int64_t>(1),
std::accumulate(reshape2_shape.begin(), std::multiplies<int64_t>()) / // NOLINT
reshape2_shape.end(), std::accumulate(reshape2_shape.begin(),
static_cast<int64_t>(-1), reshape2_shape.end(),
std::multiplies<int64_t>()); static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break; break;
} }
} }
......
...@@ -97,7 +97,7 @@ void MemoryOptimizePass::CollectLifeCycle( ...@@ -97,7 +97,7 @@ void MemoryOptimizePass::CollectLifeCycle(
auto var_bytes = std::accumulate(in_shape.begin(), auto var_bytes = std::accumulate(in_shape.begin(),
in_shape.end(), in_shape.end(),
(int64_t)1, (int64_t)1,
std::multiplies<int64_t>()); std::multiplies<>());
persis_byte += persis_byte +=
paddle::framework::SizeOfType(node->Var()->GetDataType()) * paddle::framework::SizeOfType(node->Var()->GetDataType()) *
var_bytes; var_bytes;
...@@ -183,8 +183,8 @@ void MemoryOptimizePass::CollectVarMemorySize( ...@@ -183,8 +183,8 @@ void MemoryOptimizePass::CollectVarMemorySize(
if (v < 0) v = fake_batch_size; if (v < 0) v = fake_batch_size;
} }
int size = std::accumulate( int size =
shape.begin(), shape.end(), 1, std::multiplies<int>()); std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>());
(*space_table)[node->Var()->Name()] = (*space_table)[node->Var()->Name()] =
size * paddle::framework::SizeOfType(node->Var()->GetDataType()); size * paddle::framework::SizeOfType(node->Var()->GetDataType());
} }
......
...@@ -40,7 +40,7 @@ int PaddleDtypeSize(PaddleDType dtype) { ...@@ -40,7 +40,7 @@ int PaddleDtypeSize(PaddleDType dtype) {
} }
} }
PaddleBuf::PaddleBuf(PaddleBuf &&other) PaddleBuf::PaddleBuf(PaddleBuf &&other) noexcept
: data_(other.data_), : data_(other.data_),
length_(other.length_), length_(other.length_),
memory_owned_(other.memory_owned_) { memory_owned_(other.memory_owned_) {
...@@ -74,7 +74,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) { ...@@ -74,7 +74,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
return *this; return *this;
} }
PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) { PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) noexcept {
// only the buffer with external memory can be copied // only the buffer with external memory can be copied
data_ = other.data_; data_ = other.data_;
length_ = other.length_; length_ = other.length_;
......
...@@ -137,9 +137,9 @@ class PD_INFER_DECL PaddleBuf { ...@@ -137,9 +137,9 @@ class PD_INFER_DECL PaddleBuf {
~PaddleBuf() { Free(); } ~PaddleBuf() { Free(); }
PaddleBuf& operator=(const PaddleBuf&); PaddleBuf& operator=(const PaddleBuf&);
PaddleBuf& operator=(PaddleBuf&&); PaddleBuf& operator=(PaddleBuf&&) noexcept;
PaddleBuf() = default; PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other); PaddleBuf(PaddleBuf&& other) noexcept;
private: private:
void Free(); void Free();
......
...@@ -21,10 +21,11 @@ namespace operators { ...@@ -21,10 +21,11 @@ namespace operators {
// Shape of bitmask // Shape of bitmask
static framework::DDim GetBitmaskDims(std::vector<int> out_shape) { static framework::DDim GetBitmaskDims(std::vector<int> out_shape) {
int c = out_shape.back(); int c = out_shape.back();
int64_t nhw = int64_t nhw = std::accumulate(out_shape.begin(),
std::accumulate( out_shape.end(),
out_shape.begin(), out_shape.end(), 1, std::multiplies<int>()) / 1,
c; std::multiplies<int>()) / // NOLINT
c;
int32_t c_int32_elems = ((c + 63) & ~63) / 32; int32_t c_int32_elems = ((c + 63) & ~63) / 32;
int32_t nhw_int32_elems = ((nhw + 31) & ~31); int32_t nhw_int32_elems = ((nhw + 31) & ~31);
std::vector<int> bitmask_shape = {nhw_int32_elems, c_int32_elems, 1}; std::vector<int> bitmask_shape = {nhw_int32_elems, c_int32_elems, 1};
......
...@@ -18,13 +18,13 @@ InterfaceValue::~InterfaceValue() { ...@@ -18,13 +18,13 @@ InterfaceValue::~InterfaceValue() {
if (model_) free(model_); if (model_) free(model_);
} }
InterfaceValue::InterfaceValue(InterfaceValue&& val) { InterfaceValue::InterfaceValue(InterfaceValue&& val) noexcept {
type_id_ = val.type_id_; type_id_ = val.type_id_;
model_ = val.model_; model_ = val.model_;
val.model_ = nullptr; val.model_ = nullptr;
} }
InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) { InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept {
swap(std::move(val)); swap(std::move(val));
return *this; return *this;
} }
......
...@@ -43,9 +43,9 @@ class IR_API InterfaceValue { ...@@ -43,9 +43,9 @@ class IR_API InterfaceValue {
InterfaceValue() = default; InterfaceValue() = default;
explicit InterfaceValue(TypeId type_id) : type_id_(type_id) {} explicit InterfaceValue(TypeId type_id) : type_id_(type_id) {}
InterfaceValue(const InterfaceValue &) = delete; InterfaceValue(const InterfaceValue &) = delete;
InterfaceValue(InterfaceValue &&); InterfaceValue(InterfaceValue &&) noexcept;
InterfaceValue &operator=(const InterfaceValue &) = delete; InterfaceValue &operator=(const InterfaceValue &) = delete;
InterfaceValue &operator=(InterfaceValue &&); InterfaceValue &operator=(InterfaceValue &&) noexcept;
~InterfaceValue(); ~InterfaceValue();
void swap(InterfaceValue &&val) { void swap(InterfaceValue &&val) {
using std::swap; using std::swap;
......
...@@ -96,7 +96,7 @@ class PADDLE_API Tensor final { ...@@ -96,7 +96,7 @@ class PADDLE_API Tensor final {
/** /**
* @brief Construct a new Tensor object by move * @brief Construct a new Tensor object by move
*/ */
Tensor(Tensor&&) = default; Tensor(Tensor&&) noexcept = default;
/** /**
* @brief Construct a new Tensor object by a TensorBase pointer * @brief Construct a new Tensor object by a TensorBase pointer
...@@ -522,7 +522,7 @@ class PADDLE_API Tensor final { ...@@ -522,7 +522,7 @@ class PADDLE_API Tensor final {
* @param x * @param x
* @return Tensor& * @return Tensor&
*/ */
Tensor& operator=(Tensor&& x) &; Tensor& operator=(Tensor&& x) & noexcept;
/** /**
* @brief Tensor operants * @brief Tensor operants
......
...@@ -399,7 +399,7 @@ void Tensor::reset() { ...@@ -399,7 +399,7 @@ void Tensor::reset() {
Tensor &Tensor::operator=(const Tensor &x) & = default; Tensor &Tensor::operator=(const Tensor &x) & = default;
Tensor &Tensor::operator=(Tensor &&x) & { Tensor &Tensor::operator=(Tensor &&x) &noexcept {
impl_ = std::move(x.impl_); impl_ = std::move(x.impl_);
autograd_meta_ = std::move(x.autograd_meta_); autograd_meta_ = std::move(x.autograd_meta_);
name_ = std::move(x.name_); name_ = std::move(x.name_);
......
...@@ -65,9 +65,9 @@ CPUContext::CPUContext(const Place& place) ...@@ -65,9 +65,9 @@ CPUContext::CPUContext(const Place& place)
CPUContext::~CPUContext() = default; CPUContext::~CPUContext() = default;
CPUContext::CPUContext(CPUContext&&) = default; CPUContext::CPUContext(CPUContext&&) = default; // NOLINT
CPUContext& CPUContext::operator=(CPUContext&&) = default; CPUContext& CPUContext::operator=(CPUContext&&) = default; // NOLINT
Eigen::DefaultDevice* CPUContext::eigen_device() const { Eigen::DefaultDevice* CPUContext::eigen_device() const {
return impl_->GetEigenDevice(); return impl_->GetEigenDevice();
......
...@@ -814,9 +814,9 @@ struct GPUContext::Impl { ...@@ -814,9 +814,9 @@ struct GPUContext::Impl {
thread_local AttributeMap GPUContext::Impl::dnn_attrs_ = {}; thread_local AttributeMap GPUContext::Impl::dnn_attrs_ = {};
GPUContext::GPUContext(GPUContext&&) = default; GPUContext::GPUContext(GPUContext&&) = default; // NOLINT
GPUContext& GPUContext::operator=(GPUContext&&) = default; GPUContext& GPUContext::operator=(GPUContext&&) = default; // NOLINT
GPUContext::GPUContext(const GPUPlace& place, bool init, int stream_priority) GPUContext::GPUContext(const GPUPlace& place, bool init, int stream_priority)
: DeviceContext(), impl_(std::make_unique<Impl>(place)) { : DeviceContext(), impl_(std::make_unique<Impl>(place)) {
......
...@@ -79,7 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) { ...@@ -79,7 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) {
return *this; return *this;
} }
DenseTensor& DenseTensor::operator=(DenseTensor&& other) { DenseTensor& DenseTensor::operator=(DenseTensor&& other) noexcept {
meta_ = std::move(other.meta_); meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_); std::swap(holder_, other.holder_);
storage_properties_ = std::move(other.storage_properties_); storage_properties_ = std::move(other.storage_properties_);
......
...@@ -69,7 +69,7 @@ class DenseTensor : public TensorBase, ...@@ -69,7 +69,7 @@ class DenseTensor : public TensorBase,
/// \brief DenseTensor shallow copy assignment. /// \brief DenseTensor shallow copy assignment.
DenseTensor& operator=(const DenseTensor& other); DenseTensor& operator=(const DenseTensor& other);
DenseTensor& operator=(DenseTensor&& other); DenseTensor& operator=(DenseTensor&& other) noexcept;
DenseTensor(); DenseTensor();
......
...@@ -312,12 +312,12 @@ DeviceContext::DeviceContext(const DeviceContext& other) { ...@@ -312,12 +312,12 @@ DeviceContext::DeviceContext(const DeviceContext& other) {
#endif #endif
} }
DeviceContext::DeviceContext(DeviceContext&& other) { DeviceContext::DeviceContext(DeviceContext&& other) noexcept {
impl_ = std::move(other.impl_); impl_ = std::move(other.impl_);
} }
DeviceContext& DeviceContext::operator=(DeviceContext&& other) = default; DeviceContext& DeviceContext::operator=(DeviceContext&& other) noexcept =
default;
DeviceContext::~DeviceContext() = default; DeviceContext::~DeviceContext() = default;
void DeviceContext::SetAllocator(const Allocator* allocator) { void DeviceContext::SetAllocator(const Allocator* allocator) {
......
...@@ -48,12 +48,12 @@ class PADDLE_API DeviceContext { ...@@ -48,12 +48,12 @@ class PADDLE_API DeviceContext {
/** /**
* @brief Move construct. * @brief Move construct.
*/ */
DeviceContext(DeviceContext&&); DeviceContext(DeviceContext&&) noexcept;
/** /**
* @brief Move assign operator. * @brief Move assign operator.
*/ */
DeviceContext& operator=(DeviceContext&&); DeviceContext& operator=(DeviceContext&&) noexcept;
/** /**
* @brief Default destruct. * @brief Default destruct.
......
...@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() { ...@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
this->SetMember(non_zero_indices, non_zero_elements, {1}, true); this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
} }
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) noexcept {
this->non_zero_elements_ = other.non_zero_elements_; this->non_zero_elements_ = other.non_zero_elements_;
this->non_zero_indices_ = other.non_zero_indices_; this->non_zero_indices_ = other.non_zero_indices_;
this->coalesced_ = other.coalesced_; this->coalesced_ = other.coalesced_;
......
...@@ -55,7 +55,7 @@ class SparseCooTensor : public TensorBase, ...@@ -55,7 +55,7 @@ class SparseCooTensor : public TensorBase,
SparseCooTensor(const SparseCooTensor& other); SparseCooTensor(const SparseCooTensor& other);
/// \brief move constructor /// \brief move constructor
SparseCooTensor(SparseCooTensor&& other); SparseCooTensor(SparseCooTensor&& other) noexcept;
/// \brief SparseCooTensor shallow copy assignment. /// \brief SparseCooTensor shallow copy assignment.
SparseCooTensor& operator=(const SparseCooTensor& other); SparseCooTensor& operator=(const SparseCooTensor& other);
......
...@@ -49,7 +49,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) { ...@@ -49,7 +49,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
return *this; return *this;
} }
StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT StringTensor& StringTensor::operator=(StringTensor&& other) noexcept {
meta_ = std::move(other.meta_); meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_); std::swap(holder_, other.holder_);
return *this; return *this;
......
...@@ -56,7 +56,7 @@ class StringTensor : public TensorBase, ...@@ -56,7 +56,7 @@ class StringTensor : public TensorBase,
/// \brief StringTensor shallow copy assignment. /// \brief StringTensor shallow copy assignment.
StringTensor& operator=(const StringTensor& other); StringTensor& operator=(const StringTensor& other);
StringTensor& operator=(StringTensor&& other); StringTensor& operator=(StringTensor&& other) noexcept;
/// \brief Destroy the tensor object and release exclusive resources. /// \brief Destroy the tensor object and release exclusive resources.
virtual ~StringTensor() = default; virtual ~StringTensor() = default;
......
...@@ -63,7 +63,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx, ...@@ -63,7 +63,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx,
auto* label_ptr = label.data<T>(); auto* label_ptr = label.data<T>();
// get unique positive class center by ascending // get unique positive class center by ascending
std::set<T, std::less<T>> unique_label; std::set<T, std::less<T>> unique_label; // NOLINT
for (int64_t i = 0; i < numel; ++i) { for (int64_t i = 0; i < numel; ++i) {
unique_label.insert(label_ptr[i]); unique_label.insert(label_ptr[i]);
} }
......
...@@ -63,7 +63,7 @@ void SampleWeightedNeighbors( ...@@ -63,7 +63,7 @@ void SampleWeightedNeighbors(
bool return_eids) { bool return_eids) {
std::priority_queue<phi::GraphWeightedNode<T>, std::priority_queue<phi::GraphWeightedNode<T>,
std::vector<phi::GraphWeightedNode<T>>, std::vector<phi::GraphWeightedNode<T>>,
std::greater<phi::GraphWeightedNode<T>>> std::greater<phi::GraphWeightedNode<T>>> // NOLINT
min_heap; min_heap;
for (size_t i = 0; i < out_src.size(); i++) { for (size_t i = 0; i < out_src.size(); i++) {
float weight_key = log2(dice_distribution(rng)) * (1 / out_weight[i]); float weight_key = log2(dice_distribution(rng)) * (1 / out_weight[i]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册