未验证 提交 0e9cc551 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] NO.20 clang-analyzer-core.CallAndMessage (#56954)

上级 25f78de0
...@@ -50,7 +50,7 @@ bugprone-use-after-move, ...@@ -50,7 +50,7 @@ bugprone-use-after-move,
-clang-analyzer-apiModeling.google.GTest, -clang-analyzer-apiModeling.google.GTest,
-clang-analyzer-apiModeling.llvm.CastValue, -clang-analyzer-apiModeling.llvm.CastValue,
-clang-analyzer-apiModeling.llvm.ReturnValue, -clang-analyzer-apiModeling.llvm.ReturnValue,
-clang-analyzer-core.CallAndMessage, clang-analyzer-core.CallAndMessage,
-clang-analyzer-core.DivideZero, -clang-analyzer-core.DivideZero,
-clang-analyzer-core.DynamicTypePropagation, -clang-analyzer-core.DynamicTypePropagation,
clang-analyzer-core.NonNullParamChecker, clang-analyzer-core.NonNullParamChecker,
......
...@@ -126,7 +126,7 @@ void GradNodeBase::SetGradInMeta(const paddle::Tensor& fwd_out, ...@@ -126,7 +126,7 @@ void GradNodeBase::SetGradInMeta(const paddle::Tensor& fwd_out,
} else if (phi::distributed::DistTensor::classof(fwd_out.impl().get())) { } else if (phi::distributed::DistTensor::classof(fwd_out.impl().get())) {
// TODO(chenweihang): DistTensor contains global and local meta, here // TODO(chenweihang): DistTensor contains global and local meta, here
// only set the local meta now, we should set global meta later // only set the local meta now, we should set global meta later
dense_tensor = dense_tensor = // NOLINT
&(static_cast<phi::distributed::DistTensor*>(fwd_out.impl().get()) &(static_cast<phi::distributed::DistTensor*>(fwd_out.impl().get())
->value()); ->value());
} else { } else {
......
...@@ -180,7 +180,7 @@ int DeleteCastOpPass::ApplyCastWriteReadPass(ir::Graph* graph) const { ...@@ -180,7 +180,7 @@ int DeleteCastOpPass::ApplyCastWriteReadPass(ir::Graph* graph) const {
std::string cast_out_name = write_to_array_0_x_name + "_fp16"; std::string cast_out_name = write_to_array_0_x_name + "_fp16";
VarDesc cast_out_desc(cast_out_name); VarDesc cast_out_desc(cast_out_name);
cast_out_desc.SetShape(write_to_array_0_x->Var()->GetShape()); cast_out_desc.SetShape(write_to_array_0_x->Var()->GetShape()); // NOLINT
cast_out_desc.SetDataType(proto::VarType::Type::VarType_Type_FP16); cast_out_desc.SetDataType(proto::VarType::Type::VarType_Type_FP16);
auto* cast_out = graph0->CreateVarNode(&cast_out_desc); auto* cast_out = graph0->CreateVarNode(&cast_out_desc);
......
...@@ -121,7 +121,7 @@ static int BuildFusion(Graph* graph, ...@@ -121,7 +121,7 @@ static int BuildFusion(Graph* graph,
if (with_fc_bias) { if (with_fc_bias) {
// Add FC-bias with LSTM-bias (into GEMM result to be) // Add FC-bias with LSTM-bias (into GEMM result to be)
auto* fc_bias_var = scope->FindVar(fc_bias->Name()); auto* fc_bias_var = scope->FindVar(fc_bias->Name()); // NOLINT
const auto& fc_bias_tensor = fc_bias_var->Get<phi::DenseTensor>(); const auto& fc_bias_tensor = fc_bias_var->Get<phi::DenseTensor>();
for (int i = 0; i < fc_bias_tensor.numel(); i++) { for (int i = 0; i < fc_bias_tensor.numel(); i++) {
combined_biases[i] += fc_bias_tensor.data<float>()[i]; combined_biases[i] += fc_bias_tensor.data<float>()[i];
......
...@@ -102,7 +102,8 @@ void InsertOpToGraph(const std::vector<std::vector<Node *>> &inout_node_vectors, ...@@ -102,7 +102,8 @@ void InsertOpToGraph(const std::vector<std::vector<Node *>> &inout_node_vectors,
i++; i++;
} }
fuse_adamw_op_desc.SetInput("LearningRate", {config.first_lr->Name()}); fuse_adamw_op_desc.SetInput("LearningRate",
{config.first_lr->Name()}); // NOLINT
if (config.use_skip_update) { if (config.use_skip_update) {
fuse_adamw_op_desc.SetInput("SkipUpdate", fuse_adamw_op_desc.SetInput("SkipUpdate",
{config.first_skip_update->Name()}); {config.first_skip_update->Name()});
......
...@@ -72,10 +72,12 @@ PDNode *PDPattern::RetrieveNode(const std::string &id) const { ...@@ -72,10 +72,12 @@ PDNode *PDPattern::RetrieveNode(const std::string &id) const {
} }
void PDPattern::AddEdge(PDNode *a, PDNode *b) { void PDPattern::AddEdge(PDNode *a, PDNode *b) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(a,
a, platform::errors::NotFound("PDNode %s is not found.", a->name())); platform::errors::NotFound("PDNode %s is not found.",
PADDLE_ENFORCE_NOT_NULL( a->name())); // NOLINT
b, platform::errors::NotFound("PDNode %s is not found.", b->name())); PADDLE_ENFORCE_NOT_NULL(b,
platform::errors::NotFound("PDNode %s is not found.",
b->name())); // NOLINT
PADDLE_ENFORCE_NE(a, PADDLE_ENFORCE_NE(a,
b, b,
platform::errors::PermissionDenied( platform::errors::PermissionDenied(
......
...@@ -49,7 +49,7 @@ void TransferLayoutElimPass::PutTranferlayoutAfterOp( ...@@ -49,7 +49,7 @@ void TransferLayoutElimPass::PutTranferlayoutAfterOp(
std::unordered_set<const Node *> remove_nodes; std::unordered_set<const Node *> remove_nodes;
// Ensure op_node has only one output! // Ensure op_node has only one output!
int op_node_useful_output = 0; int op_node_useful_output = 0;
Node *var2; Node *var2 = nullptr;
for (auto ele : op_node->outputs) { for (auto ele : op_node->outputs) {
if (!ele->outputs.empty()) { if (!ele->outputs.empty()) {
op_node_useful_output++; op_node_useful_output++;
......
...@@ -909,7 +909,8 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) { ...@@ -909,7 +909,8 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
} }
} }
VLOG(4) << "End run " << place << " " << op->DebugStringEx(local_scope); VLOG(4) << "End run " << place << " "
<< op->DebugStringEx(local_scope); // NOLINT
if (!instr_node.InplaceBackMap().empty()) { if (!instr_node.InplaceBackMap().empty()) {
platform::RecordEvent inplaceback_event( platform::RecordEvent inplaceback_event(
...@@ -933,7 +934,7 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) { ...@@ -933,7 +934,7 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
instr_node.DeviceContext().Wait(); instr_node.DeviceContext().Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError()); PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
VLOG(4) << "Operator(" << op->Type() VLOG(4) << "Operator(" << op->Type() // NOLINT
<< "): context wait and get last error"; << "): context wait and get last error";
#endif #endif
} }
......
...@@ -1200,7 +1200,7 @@ VarDesc *OpDesc::FindVarRecursive(const std::string &name) { ...@@ -1200,7 +1200,7 @@ VarDesc *OpDesc::FindVarRecursive(const std::string &name) {
PADDLE_THROW(platform::errors::NotFound( PADDLE_THROW(platform::errors::NotFound(
"Not found Var(%s) from Block(%d) back into global Block.", "Not found Var(%s) from Block(%d) back into global Block.",
name, name,
block_->ID())); block_->ID())); // NOLINT
} }
CompileTimeInferShapeContext::CompileTimeInferShapeContext( CompileTimeInferShapeContext::CompileTimeInferShapeContext(
......
...@@ -387,7 +387,7 @@ void VarBase::CopyFrom(const VarBase& src, const bool blocking) { ...@@ -387,7 +387,7 @@ void VarBase::CopyFrom(const VarBase& src, const bool blocking) {
src.Name())); src.Name()));
place = Place(); place = Place();
} else { } else {
dst_tensor->set_lod(src_tensor.lod()); dst_tensor->set_lod(src_tensor.lod()); // NOLINT
dst_tensor->Resize(src_tensor.dims()); dst_tensor->Resize(src_tensor.dims());
} }
framework::TensorCopy(src_tensor, place, dst_tensor); framework::TensorCopy(src_tensor, place, dst_tensor);
......
...@@ -513,13 +513,14 @@ class GRUCPUKernel : public framework::OpKernel<T> { ...@@ -513,13 +513,14 @@ class GRUCPUKernel : public framework::OpKernel<T> {
gru_value.gate_value = gate_t.data<T>(); gru_value.gate_value = gate_t.data<T>();
gru_value.reset_output_value = reset_hidden_prev_t.data<T>(); gru_value.reset_output_value = reset_hidden_prev_t.data<T>();
phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx, phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(
gru_value, dev_ctx, // NOLINT
frame_size, gru_value,
cur_batch_size, frame_size,
active_node, cur_batch_size,
active_gate, active_node,
origin_mode); active_gate,
origin_mode);
gru_value.prev_out_value = gru_value.output_value; gru_value.prev_out_value = gru_value.output_value;
} }
......
...@@ -109,7 +109,7 @@ class GRUKernel : public framework::OpKernel<T> { ...@@ -109,7 +109,7 @@ class GRUKernel : public framework::OpKernel<T> {
gru_value.output_value = hidden_t.data<T>(); gru_value.output_value = hidden_t.data<T>();
gru_value.gate_value = gate_t.data<T>(); gru_value.gate_value = gate_t.data<T>();
gru_value.reset_output_value = reset_hidden_prev_t.data<T>(); gru_value.reset_output_value = reset_hidden_prev_t.data<T>();
phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx, phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx, // NOLINT
gru_value, gru_value,
frame_size, frame_size,
cur_batch_size, cur_batch_size,
......
...@@ -600,7 +600,7 @@ class MatMulGradMKLDNNKernel : public paddle::framework::OpKernel<T> { ...@@ -600,7 +600,7 @@ class MatMulGradMKLDNNKernel : public paddle::framework::OpKernel<T> {
phi::DenseTensor *out) const { phi::DenseTensor *out) const {
// gradient is calculated in a different way when broadcasting is used // gradient is calculated in a different way when broadcasting is used
bool need_combine = (x->dims().size() == 3 || y->dims().size() == 3) && bool need_combine = (x->dims().size() == 3 || y->dims().size() == 3) &&
out->dims().size() == 2; out->dims().size() == 2; // NOLINT
phi::DenseTensor x_combined, y_combined; phi::DenseTensor x_combined, y_combined;
if (need_combine) { if (need_combine) {
......
...@@ -81,7 +81,7 @@ class SequenceSoftmaxGradCUDNNKernel : public framework::OpKernel<T> { ...@@ -81,7 +81,7 @@ class SequenceSoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
auto& lod = x->lod(); auto& lod = x->lod();
const size_t level = lod.size() - 1; const size_t level = lod.size() - 1;
x_grad->mutable_data<T>(ctx.GetPlace()); x_grad->mutable_data<T>(ctx.GetPlace()); // NOLINT
for (int i = 0; i < static_cast<int>(lod[level].size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod[level].size()) - 1; ++i) {
int start_pos = static_cast<int>(lod[level][i]); int start_pos = static_cast<int>(lod[level][i]);
int end_pos = static_cast<int>(lod[level][i + 1]); int end_pos = static_cast<int>(lod[level][i + 1]);
......
...@@ -295,7 +295,7 @@ void RnnFunc(const Context& dev_ctx, ...@@ -295,7 +295,7 @@ void RnnFunc(const Context& dev_ctx,
num_layers, num_layers,
init_h_dims[0])); init_h_dims[0]));
if (is_lstm(cell_type)) { if (is_lstm(cell_type)) {
const auto& init_c_dims = init_c->dims(); const auto& init_c_dims = init_c->dims(); // NOLINT
PADDLE_ENFORCE_EQ(init_c_dims[0], PADDLE_ENFORCE_EQ(init_c_dims[0],
num_layers * direction_num, num_layers * direction_num,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
......
...@@ -32,7 +32,7 @@ template <typename T> ...@@ -32,7 +32,7 @@ template <typename T>
void BackupTensor(const CPUContext& dev_ctx, void BackupTensor(const CPUContext& dev_ctx,
DenseTensor* dst, DenseTensor* dst,
DenseTensor* src) { DenseTensor* src) {
dst->Resize(src->dims()); dst->Resize(src->dims()); // NOLINT
dev_ctx.Alloc<T>(dst); dev_ctx.Alloc<T>(dst);
Copy(dev_ctx, *src, dev_ctx.GetPlace(), false, dst); Copy(dev_ctx, *src, dev_ctx.GetPlace(), false, dst);
} }
...@@ -250,7 +250,7 @@ struct GRUGradCell : GradCell<T> { ...@@ -250,7 +250,7 @@ struct GRUGradCell : GradCell<T> {
gru_value.gate_weight = weight_hh->data<T>(); gru_value.gate_weight = weight_hh->data<T>();
gru_grad.gate_grad = grad_gate->data<T>(); gru_grad.gate_grad = grad_gate->data<T>();
gru_grad.reset_output_grad = grad_state->data<T>(); gru_grad.reset_output_grad = grad_state->data<T>(); // NOLINT
gru_grad.prev_out_grad = grad_pre_hidden->data<T>(); gru_grad.prev_out_grad = grad_pre_hidden->data<T>();
gru_grad.output_grad = grad_hidden->data<T>(); gru_grad.output_grad = grad_hidden->data<T>();
gru_grad.gate_weight_grad = grad_weight_hh->data<T>(); gru_grad.gate_weight_grad = grad_weight_hh->data<T>();
...@@ -314,9 +314,9 @@ struct LSTMGradCell : GradCell<T> { ...@@ -314,9 +314,9 @@ struct LSTMGradCell : GradCell<T> {
lstm_value.gate_value = gate_tensor->data<T>(); lstm_value.gate_value = gate_tensor->data<T>();
lstm_value.state_value = state_tensor->data<T>(); lstm_value.state_value = state_tensor->data<T>();
lstm_value.state_active_value = act_state_tensor->data<T>(); lstm_value.state_active_value = act_state_tensor->data<T>();
lstm_value.prev_state_value = pre_state->data<T>(); lstm_value.prev_state_value = pre_state->data<T>(); // NOLINT
lstm_grad.state_grad = grad_state->data<T>(); lstm_grad.state_grad = grad_state->data<T>(); // NOLINT
lstm_grad.gate_grad = grad_gate->data<T>(); lstm_grad.gate_grad = grad_gate->data<T>();
lstm_grad.output_grad = grad_hidden->data<T>(); lstm_grad.output_grad = grad_hidden->data<T>();
lstm_grad.prev_state_grad = grad_pre_state->data<T>(); lstm_grad.prev_state_grad = grad_pre_state->data<T>();
......
...@@ -55,7 +55,8 @@ void GraphSendRecvCpuGradLoop(const int& index_size, ...@@ -55,7 +55,8 @@ void GraphSendRecvCpuGradLoop(const int& index_size,
const IndexT& forward_src_idx = d_index[i]; const IndexT& forward_src_idx = d_index[i];
const IndexT& forward_dst_idx = s_index[i]; const IndexT& forward_dst_idx = s_index[i];
auto input_slice = input.Slice(forward_src_idx, forward_src_idx + 1); auto input_slice = input.Slice(forward_src_idx, forward_src_idx + 1);
auto output_slice = output->Slice(forward_dst_idx, forward_dst_idx + 1); auto output_slice =
output->Slice(forward_dst_idx, forward_dst_idx + 1); // NOLINT
auto eigen_input = phi::EigenVector<T>::Flatten(input_slice); auto eigen_input = phi::EigenVector<T>::Flatten(input_slice);
auto eigen_output = phi::EigenVector<T>::Flatten(output_slice); auto eigen_output = phi::EigenVector<T>::Flatten(output_slice);
......
...@@ -528,7 +528,7 @@ struct MishFunctor : public BaseActivationFunctor<T> { ...@@ -528,7 +528,7 @@ struct MishFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
auto sp = (x > static_cast<T>(threshold)) auto sp = (x > static_cast<T>(threshold)) // NOLINT
.select(x, (static_cast<T>(1) + x.exp()).log()); .select(x, (static_cast<T>(1) + x.exp()).log());
out.device(d) = x * sp.tanh(); out.device(d) = x * sp.tanh();
} }
...@@ -551,7 +551,7 @@ struct MishGradFunctor : public BaseActivationFunctor<T> { ...@@ -551,7 +551,7 @@ struct MishGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto sp = (x > static_cast<T>(threshold)) auto sp = (x > static_cast<T>(threshold)) // NOLINT
.select(x, (static_cast<T>(1) + x.exp()).log()); .select(x, (static_cast<T>(1) + x.exp()).log());
auto gsp = static_cast<T>(1) - (-sp).exp(); auto gsp = static_cast<T>(1) - (-sp).exp();
auto tsp = sp.tanh(); auto tsp = sp.tanh();
...@@ -571,8 +571,8 @@ struct STanhFunctor : public BaseActivationFunctor<T> { ...@@ -571,8 +571,8 @@ struct STanhFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = out.device(d) = static_cast<T>(scale_b) *
static_cast<T>(scale_b) * (static_cast<T>(scale_a) * x).tanh(); (static_cast<T>(scale_a) * x).tanh(); // NOLINT
} }
}; };
...@@ -738,7 +738,7 @@ struct SoftplusFunctor : public BaseActivationFunctor<T> { ...@@ -738,7 +738,7 @@ struct SoftplusFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
auto x_beta = static_cast<T>(beta) * x; auto x_beta = static_cast<T>(beta) * x; // NOLINT
out.device(d) = (x_beta > static_cast<T>(threshold)) out.device(d) = (x_beta > static_cast<T>(threshold))
.select(x, .select(x,
(static_cast<T>(1) + x_beta.exp()).log() / (static_cast<T>(1) + x_beta.exp()).log() /
...@@ -764,7 +764,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor<T> { ...@@ -764,7 +764,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto x_beta = static_cast<T>(beta) * x; auto x_beta = static_cast<T>(beta) * x; // NOLINT
dx.device(d) = dx.device(d) =
(x_beta > static_cast<T>(threshold)) (x_beta > static_cast<T>(threshold))
.select(dout, dout / (static_cast<T>(1) + (-x_beta).exp())); .select(dout, dout / (static_cast<T>(1) + (-x_beta).exp()));
...@@ -790,7 +790,7 @@ struct SoftplusDoubleGradFunctor : public BaseActivationFunctor<T> { ...@@ -790,7 +790,7 @@ struct SoftplusDoubleGradFunctor : public BaseActivationFunctor<T> {
auto* d = dev.eigen_device(); auto* d = dev.eigen_device();
auto x = EigenVector<T>::Flatten( auto x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(X, "Input", "X", "SoftplusDoubleGrad")); GET_DATA_SAFELY(X, "Input", "X", "SoftplusDoubleGrad"));
auto x_beta = static_cast<T>(beta) * x; auto x_beta = static_cast<T>(beta) * x; // NOLINT
auto ddx = EigenVector<T>::Flatten( auto ddx = EigenVector<T>::Flatten(
GET_DATA_SAFELY(ddX, "Input", "DDX", "SoftplusDoubleGrad")); GET_DATA_SAFELY(ddX, "Input", "DDX", "SoftplusDoubleGrad"));
...@@ -1453,8 +1453,8 @@ struct HardTanhFunctor : public BaseActivationFunctor<T> { ...@@ -1453,8 +1453,8 @@ struct HardTanhFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = out.device(d) = x.cwiseMax(static_cast<T>(t_min))
x.cwiseMax(static_cast<T>(t_min)).cwiseMin(static_cast<T>(t_max)); .cwiseMin(static_cast<T>(t_max)); // NOLINT
} }
}; };
...@@ -1471,9 +1471,9 @@ struct HardTanhGradFunctor : public BaseActivationFunctor<T> { ...@@ -1471,9 +1471,9 @@ struct HardTanhGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dx.device(d) = dout * ((x > static_cast<T>(t_min)) *
dout * ((x > static_cast<T>(t_min)) * (x < static_cast<T>(t_max))) (x < static_cast<T>(t_max))) // NOLINT
.template cast<T>(); .template cast<T>();
} }
static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; }
...@@ -1508,8 +1508,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> { ...@@ -1508,8 +1508,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 = auto temp1 = static_cast<T>(alpha) *
static_cast<T>(alpha) * (x < static_cast<T>(0)).template cast<T>(); (x < static_cast<T>(0)).template cast<T>(); // NOLINT
auto temp2 = (x >= static_cast<T>(0)).template cast<T>(); auto temp2 = (x >= static_cast<T>(0)).template cast<T>();
dx.device(d) = dout * (temp1 + temp2).template cast<T>(); dx.device(d) = dout * (temp1 + temp2).template cast<T>();
} }
...@@ -1593,8 +1593,8 @@ struct Relu6Functor : public BaseActivationFunctor<T> { ...@@ -1593,8 +1593,8 @@ struct Relu6Functor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = out.device(d) = x.cwiseMax(static_cast<T>(0))
x.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(threshold)); .cwiseMin(static_cast<T>(threshold)); // NOLINT
} }
}; };
...@@ -1731,7 +1731,8 @@ struct ELUFunctor : public BaseActivationFunctor<T> { ...@@ -1731,7 +1731,8 @@ struct ELUFunctor : public BaseActivationFunctor<T> {
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = out.device(d) =
(x < static_cast<T>(0)) (x < static_cast<T>(0))
.select(static_cast<T>(alpha) * (x.exp() - static_cast<T>(1)), x); .select(static_cast<T>(alpha) * (x.exp() - static_cast<T>(1)),
x); // NOLINT
} }
}; };
...@@ -2099,7 +2100,7 @@ struct HardSigmoidFunctor : public BaseActivationFunctor<T> { ...@@ -2099,7 +2100,7 @@ struct HardSigmoidFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
auto temp = x * static_cast<T>(slope) + static_cast<T>(offset); auto temp = x * static_cast<T>(slope) + static_cast<T>(offset); // NOLINT
out.device(d) = out.device(d) =
temp.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(1)); temp.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(1));
} }
...@@ -2118,7 +2119,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> { ...@@ -2118,7 +2119,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * dx.device(d) = dout * // NOLINT
((out > static_cast<T>(0)) * (out < static_cast<T>(1))) ((out > static_cast<T>(0)) * (out < static_cast<T>(1)))
.template cast<T>() * .template cast<T>() *
static_cast<T>(slope); static_cast<T>(slope);
...@@ -2353,7 +2354,7 @@ struct HardSwishFunctor : public BaseActivationFunctor<T> { ...@@ -2353,7 +2354,7 @@ struct HardSwishFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = (x + static_cast<T>(offset)) out.device(d) = (x + static_cast<T>(offset)) // NOLINT
.cwiseMax(static_cast<T>(0)) .cwiseMax(static_cast<T>(0))
.cwiseMin(static_cast<T>(threshold)) * .cwiseMin(static_cast<T>(threshold)) *
x / static_cast<T>(scale); x / static_cast<T>(scale);
...@@ -2375,8 +2376,9 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> { ...@@ -2375,8 +2376,9 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto tmp = ((x + static_cast<T>(offset)) < static_cast<T>(threshold)) auto tmp =
.template cast<T>(); ((x + static_cast<T>(offset)) < static_cast<T>(threshold)) // NOLINT
.template cast<T>();
dx.device(d) = dx.device(d) =
dout * dout *
(((x + static_cast<T>(offset)) > static_cast<T>(0)).template cast<T>() * (((x + static_cast<T>(offset)) > static_cast<T>(0)).template cast<T>() *
...@@ -2397,7 +2399,8 @@ struct SwishFunctor : public BaseActivationFunctor<T> { ...@@ -2397,7 +2399,8 @@ struct SwishFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = x / (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp()); out.device(d) =
x / (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp()); // NOLINT
} }
}; };
...@@ -2431,7 +2434,7 @@ struct PowFunctor : public BaseActivationFunctor<T> { ...@@ -2431,7 +2434,7 @@ struct PowFunctor : public BaseActivationFunctor<T> {
} }
template <typename Device, typename X, typename Out> template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = x.pow(static_cast<T>(factor)); out.device(d) = x.pow(static_cast<T>(factor)); // NOLINT
} }
}; };
...@@ -2585,8 +2588,8 @@ struct CELUFunctor : public BaseActivationFunctor<T> { ...@@ -2585,8 +2588,8 @@ struct CELUFunctor : public BaseActivationFunctor<T> {
void operator()(Device d, X x, Out out) const { void operator()(Device d, X x, Out out) const {
out.device(d) = out.device(d) =
(x < static_cast<T>(0)) (x < static_cast<T>(0))
.select(static_cast<T>(alpha) * .select(static_cast<T>(alpha) * ((x / static_cast<T>(alpha)).exp() -
((x / static_cast<T>(alpha)).exp() - static_cast<T>(1)), static_cast<T>(1)), // NOLINT
x); x);
} }
}; };
......
...@@ -893,7 +893,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { ...@@ -893,7 +893,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Copy the old contour and hole data into the extended arrays */ /* Copy the old contour and hole data into the extended arrays */
for (c = 0; c < p->num_contours; c++) { for (c = 0; c < p->num_contours; c++) {
extended_hole[c] = p->hole[c]; extended_hole[c] = p->hole[c];
extended_contour[c] = p->contour[c]; extended_contour[c] = p->contour[c]; // NOLINT
} }
/* Copy the new contour and hole onto the end of the extended arrays */ /* Copy the new contour and hole onto the end of the extended arrays */
...@@ -905,7 +905,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { ...@@ -905,7 +905,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
new_contour->num_vertices * static_cast<int>(sizeof(gpc_vertex)), new_contour->num_vertices * static_cast<int>(sizeof(gpc_vertex)),
const_cast<char *>("contour addition")); const_cast<char *>("contour addition"));
for (v = 0; v < new_contour->num_vertices; v++) { for (v = 0; v < new_contour->num_vertices; v++) {
extended_contour[c].vertex[v] = new_contour->vertex[v]; extended_contour[c].vertex[v] = new_contour->vertex[v]; // NOLINT
} }
/* Dispose of the old contour */ /* Dispose of the old contour */
......
...@@ -110,7 +110,7 @@ class RNNDescriptors { ...@@ -110,7 +110,7 @@ class RNNDescriptors {
dropout_state->Resize({static_cast<int64_t>(state_size)}); dropout_state->Resize({static_cast<int64_t>(state_size)});
dev_ctx.template Alloc<uint8_t>(dropout_state); dev_ctx.template Alloc<uint8_t>(dropout_state);
} }
dropout_desc_.descriptor(handle, dropout_desc_.descriptor(handle, // NOLINT
dev_ctx.GetPlace(), dev_ctx.GetPlace(),
is_initialized, is_initialized,
dropout_prob_, dropout_prob_,
......
...@@ -40,7 +40,7 @@ void AllocCsrPtr(const Context& dev_ctx, ...@@ -40,7 +40,7 @@ void AllocCsrPtr(const Context& dev_ctx,
DenseTensor dx_crows = phi::EmptyLike<IntT>(dev_ctx, x.crows()); DenseTensor dx_crows = phi::EmptyLike<IntT>(dev_ctx, x.crows());
DenseTensor dx_cols = phi::EmptyLike<IntT>(dev_ctx, x.cols()); DenseTensor dx_cols = phi::EmptyLike<IntT>(dev_ctx, x.cols());
DenseTensor dx_values = phi::EmptyLike<T>(dev_ctx, x.values()); DenseTensor dx_values = phi::EmptyLike<T>(dev_ctx, x.values());
dx->set_meta(x.meta()); dx->set_meta(x.meta()); // NOLINT
dx->SetMember(dx_crows, dx_cols, dx_values, x.dims()); dx->SetMember(dx_crows, dx_cols, dx_values, x.dims());
} }
...@@ -50,7 +50,7 @@ void AllocCooPtr(const Context& dev_ctx, ...@@ -50,7 +50,7 @@ void AllocCooPtr(const Context& dev_ctx,
SparseCooTensor* dx) { SparseCooTensor* dx) {
DenseTensor dx_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices()); DenseTensor dx_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
DenseTensor dx_values = phi::EmptyLike<T>(dev_ctx, x.values()); DenseTensor dx_values = phi::EmptyLike<T>(dev_ctx, x.values());
dx->set_meta(x.meta()); dx->set_meta(x.meta()); // NOLINT
dx->SetMember(dx_indices, dx_values, x.dims(), x.coalesced()); dx->SetMember(dx_indices, dx_values, x.dims(), x.coalesced());
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册