未验证 提交 10a38b4e 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] warning and fix a file to pass cpplint (#53814)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 e592534a
......@@ -61,7 +61,9 @@ HOSTDEVICE bool NeedPrint(MT max_value, MT min_value, int check_nan_inf_level) {
template <typename T,
typename MT,
std::enable_if_t<!std::is_same<T, float>::value, bool> = true>
HOSTDEVICE bool NeedPrint(MT max_value, MT min_value, int check_nan_inf_level) {
HOSTDEVICE bool NeedPrint(MT max_value UNUSED,
MT min_value UNUSED,
int check_nan_inf_level) {
if (check_nan_inf_level >= 3) {
return true;
}
......
......@@ -95,7 +95,7 @@ class TemplateVariable {
strings_[identifier] = expression;
}
void Remove(std::string identifier, std::string expression) {
void Remove(std::string identifier, std::string expression UNUSED) {
for (auto it = strings_.begin(); it != strings_.end();) {
if (it->first == identifier) {
it = strings_.erase(it);
......
......@@ -73,7 +73,8 @@ void EraseLoadProcessPIDs(int64_t key) {
} while (0)
#define REGISTER_SIGNAL_HANDLER(SIGNAL, HANDLER_NAME, ERROR_MSG) \
static void HANDLER_NAME(int sig, siginfo_t *info, void *ctx) { \
static void HANDLER_NAME( \
int sig UNUSED, siginfo_t *info UNUSED, void *ctx UNUSED) { \
auto _w = \
write(STDERR_FILENO, ERROR_MSG, sizeof(ERROR_MSG) / sizeof(char)); \
(void)_w; \
......
......@@ -237,14 +237,14 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
}
}
void ShareAllLoD(const std::string& in,
const std::string& out) const override {
void ShareAllLoD(const std::string& in UNUSED,
const std::string& out UNUSED) const override {
// do nothing
}
void ShareLoD(const std::string& in,
const std::string& out,
size_t i = 0,
size_t j = 0) const override {
void ShareLoD(const std::string& in UNUSED,
const std::string& out UNUSED,
size_t i UNUSED = 0,
size_t j UNUSED = 0) const override {
// do nothing
}
......@@ -415,14 +415,15 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
}
}
int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override {
int32_t GetLoDLevel(const std::string& in UNUSED,
size_t i UNUSED = 0) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"GetLoDLevel function not support in dygraph mode"));
}
void SetLoDLevel(const std::string& out,
int32_t lod_level,
size_t j = 0) const override {
void SetLoDLevel(const std::string& out UNUSED,
int32_t lod_level UNUSED,
size_t j UNUSED = 0) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"SetLoDLevel function not support in dygraph mode"));
}
......@@ -452,7 +453,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
}
}
std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
std::vector<DDim> GetRepeatedDims(
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"GetRepeatedDims not support in dygraph runtime"));
}
......@@ -486,8 +488,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
}
}
void SetRepeatedDims(const std::string& name,
const std::vector<DDim>& dims) override {
void SetRepeatedDims(const std::string& name UNUSED,
const std::vector<DDim>& dims UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"SetRepeatedDims not support in dygraph runtime"));
}
......
......@@ -145,8 +145,8 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext {
}
void SetOutputDataType(const std::string& name,
framework::proto::VarType::Type type,
int index = 0) override {
framework::proto::VarType::Type type UNUSED,
int index UNUSED = 0) override {
VLOG(10) << "Set data type in infer var type of Eager mode is meaning less "
"for var: "
<< name;
......@@ -155,77 +155,79 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext {
bool IsDygraph() const override { return true; }
protected:
bool HasVar(const std::string& name) const override {
bool HasVar(const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"HasVar is not supported in runtime InferVarType"));
}
const std::vector<std::string>& InputVars(
const std::string& name) const override {
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"InputVars is not supported in runtime InferVarType"));
}
const std::vector<std::string>& OutputVars(
const std::string& name) const override {
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"OutputVars is not supported in runtime InferVarType"));
}
framework::proto::VarType::Type GetVarType(
const std::string& name) const override {
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not manipulate var in runtime InferVarType"));
}
void SetVarType(const std::string& name,
framework::proto::VarType::Type type) override {
void SetVarType(const std::string& name UNUSED,
framework::proto::VarType::Type type UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not manipulate var in runtime InferVarType"));
}
framework::proto::VarType::Type GetVarDataType(
const std::string& name) const override {
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not manipulate var in runtime InferVarType"));
}
void SetVarDataType(const std::string& name,
framework::proto::VarType::Type type) override {
void SetVarDataType(const std::string& name UNUSED,
framework::proto::VarType::Type type UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not manipulate var in runtime InferVarType"));
}
std::vector<framework::proto::VarType::Type> GetVarDataTypes(
const std::string& name) const override {
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"GetVarDataTypes is not supported in runtime InferVarType"));
}
void SetVarDataTypes(const std::string& name,
void SetVarDataTypes(const std::string& name UNUSED,
const std::vector<framework::proto::VarType::Type>&
multiple_data_type) override {
multiple_data_type UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"SetVarDataTypes is not supported in runtime InferVarType"));
}
std::vector<int64_t> GetVarShape(const std::string& name) const override {
std::vector<int64_t> GetVarShape(
const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not handle Shape in runtime InferVarType"));
}
void SetVarShape(const std::string& name,
const std::vector<int64_t>& dims) override {
void SetVarShape(const std::string& name UNUSED,
const std::vector<int64_t>& dims UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not handle Shape in runtime InferVarType"));
}
int32_t GetVarLoDLevel(const std::string& name) const override {
int32_t GetVarLoDLevel(const std::string& name UNUSED) const override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not handle LoDLevel in runtime InferVarType"));
}
void SetVarLoDLevel(const std::string& name, int32_t lod_level) override {
void SetVarLoDLevel(const std::string& name UNUSED,
int32_t lod_level UNUSED) override {
PADDLE_THROW(platform::errors::PermissionDenied(
"Do not handle LoDLevel in runtime InferVarType"));
}
......
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class CConcatOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_concat for cpu kernel now."));
}
......
......@@ -32,7 +32,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class CSoftmaxWithCrossEntropyOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_embedding for cpu kernel now."));
}
......
......@@ -97,7 +97,7 @@ void FeedSparseCooTensorKernel(const Context& dev_ctx,
}
template <typename Context>
void FeedStringsKernel(const Context& dev_ctx,
void FeedStringsKernel(const Context& dev_ctx UNUSED,
const phi::ExtendedTensor& x,
int col,
phi::ExtendedTensor* out) {
......
......@@ -159,7 +159,7 @@ class ElementwiseOp : public framework::OperatorWithKernel {
}
phi::KernelKey GetKernelTypeForVar(
const std::string &var_name,
const std::string &var_name UNUSED,
const phi::DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.dtype())) {
......@@ -305,7 +305,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel {
}
phi::KernelKey GetKernelTypeForVar(
const std::string &var_name,
const std::string &var_name UNUSED,
const phi::DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.dtype())) {
......@@ -346,7 +346,7 @@ class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
}
phi::KernelKey GetKernelTypeForVar(
const std::string &var_name,
const std::string &var_name UNUSED,
const phi::DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.dtype())) {
......@@ -394,7 +394,7 @@ class ElementwiseOpDoubleGradWithoutDXDY
}
phi::KernelKey GetKernelTypeForVar(
const std::string &var_name,
const std::string &var_name UNUSED,
const phi::DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.dtype())) {
......@@ -442,7 +442,7 @@ class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
}
phi::KernelKey GetKernelTypeForVar(
const std::string &var_name,
const std::string &var_name UNUSED,
const phi::DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.dtype())) {
......
......@@ -598,7 +598,7 @@ class MultiGRUHandler {
}
template <typename Tout>
void reorderOutput(std::shared_ptr<dnnl::memory> mem, int layer) {
void reorderOutput(std::shared_ptr<dnnl::memory> mem, int layer UNUSED) {
auto* data = mem->get_data_handle();
auto* hidden_data =
phi::funcs::to_void_cast(hidden_->mutable_data<Tout>(place_));
......
......@@ -57,7 +57,8 @@ DeviceType Place2DeviceType(const platform::Place& place) {
template <typename DevCtx>
typename std::enable_if<!std::is_same<DevCtx, phi::GPUContext>::value,
DevCtx*>::type
ConstructDevCtx(const phi::Place& p, /*unused*/ int stream_priority = 0) {
ConstructDevCtx(const phi::Place& p,
/*unused*/ int stream_priority UNUSED = 0) {
return new DevCtx(p);
}
......
......@@ -21,19 +21,19 @@
namespace phi {
template <typename T, typename Context>
void MarginCrossEntropyKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& labels,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* softmax,
DenseTensor* loss) {
void MarginCrossEntropyKernel(const Context& dev_ctx UNUSED,
const DenseTensor& logits UNUSED,
const DenseTensor& labels UNUSED,
bool return_softmax UNUSED,
int ring_id UNUSED,
int rank UNUSED,
int nranks UNUSED,
float margin1 UNUSED,
float margin2 UNUSED,
float margin3 UNUSED,
float scale UNUSED,
DenseTensor* softmax UNUSED,
DenseTensor* loss UNUSED) {
PADDLE_THROW(
errors::Unavailable("Do not support margin_cross_entropy for cpu kernel "
"now."));
......
......@@ -79,7 +79,7 @@ void CreateMaskMatrix(const CPUContext& dev_ctx,
template <typename TensorType>
void ResetParameterVector(const std::vector<TensorType>& raw_params_vec,
int num_layers,
int gate_num,
int gate_num UNUSED,
bool is_bidirec,
std::vector<std::vector<DenseTensor>>* params_vec) {
// the parameter raw seuquence is [FWhi, FWhh, BWhi, BWhh] * num_layers
......
......@@ -117,10 +117,10 @@ void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const DenseTensor& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* master_param_out) {
DenseTensor* master_param_out UNUSED) {
dev_ctx.template Alloc<T>(param_out);
sgd_dense_param_dense_grad_impl<T>(param, learning_rate, grad, param_out);
}
......@@ -131,24 +131,24 @@ void SGDDenseParamSparseGradKernel(
const DenseTensor& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* master_param_out) {
DenseTensor* master_param_out UNUSED) {
dev_ctx.template Alloc<T>(param_out);
sgd_dense_param_sparse_grad_impl<T>(param, learning_rate, grad, param_out);
}
template <typename T, typename Context>
void SGDSparseParamSparseGradKernel(
const Context& dev_ctx,
const Context& dev_ctx UNUSED,
const SelectedRows& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<SelectedRows>& master_param,
bool multi_precision,
const paddle::optional<SelectedRows>& master_param UNUSED,
bool multi_precision UNUSED,
SelectedRows* param_out,
SelectedRows* master_param_out) {
SelectedRows* master_param_out UNUSED) {
// for distributed training, a sparse var may be empty,
// just skip updating.
if (grad.rows().size() == 0) {
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void UniformKernel(const Context &dev_ctx,
const IntArray &shape,
DataType dtype,
DataType dtype UNUSED,
const Scalar &min,
const Scalar &max,
int seed,
......
......@@ -104,7 +104,7 @@ struct SinGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * x.unaryExpr(Cosine<T>());
}
......@@ -277,7 +277,7 @@ struct ReciprocalGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(-1) * out * out;
}
......@@ -310,7 +310,7 @@ struct CosGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = -dout * x.unaryExpr(Sine<T>());
}
......@@ -505,7 +505,7 @@ struct MishGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto sp = (x > static_cast<T>(threshold))
.select(x, (static_cast<T>(1) + x.exp()).log());
auto gsp = static_cast<T>(1) - (-sp).exp();
......@@ -544,7 +544,7 @@ struct STanhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto a = static_cast<T>(scale_a);
auto b = static_cast<T>(scale_b);
auto temp = (a * x).tanh() * (a * x).tanh();
......@@ -574,7 +574,7 @@ struct TanGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout / x.unaryExpr(Cosine<T>()).square();
}
......@@ -620,7 +620,7 @@ struct SqrtGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = static_cast<T>(0.5) * dout / out;
}
......@@ -645,7 +645,7 @@ struct RsqrtGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = static_cast<T>(-0.5) * dout * out * out * out;
}
......@@ -697,7 +697,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto x_beta = static_cast<T>(beta) * x;
dx.device(d) =
(x_beta > static_cast<T>(threshold))
......@@ -816,7 +816,7 @@ struct SinhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * x.unaryExpr(Cosh<T>());
}
......@@ -831,7 +831,7 @@ struct CoshGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * x.unaryExpr(Sinh<T>());
}
......@@ -867,7 +867,7 @@ struct AcosGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
-dout * static_cast<T>(1) / (static_cast<T>(1) - x.square()).sqrt();
}
......@@ -904,7 +904,7 @@ struct AsinGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * static_cast<T>(1) / (static_cast<T>(1) - x.square()).sqrt();
}
......@@ -941,7 +941,7 @@ struct AtanGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(1) / (static_cast<T>(1) + x.square());
}
......@@ -989,7 +989,7 @@ struct AcoshGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * static_cast<T>(1) / (x * x - static_cast<T>(1)).sqrt();
}
......@@ -1026,7 +1026,7 @@ struct AsinhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * static_cast<T>(1) / (x.square() + static_cast<T>(1)).sqrt();
}
......@@ -1063,7 +1063,7 @@ struct AtanhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(1) / (static_cast<T>(1) - x.square());
}
......@@ -1160,12 +1160,12 @@ template <typename T>
struct ReluGradGradFunctor : public BaseActivationFunctor<T> {
template <typename Device>
void operator()(const Device& dev,
const DenseTensor* X,
const DenseTensor* X UNUSED,
const DenseTensor* Out,
const DenseTensor* ddX,
DenseTensor* ddOut,
DenseTensor* dOut,
DenseTensor* dX) const {
DenseTensor* dOut UNUSED,
DenseTensor* dX UNUSED) const {
auto* d = dev.eigen_device();
auto ddx = EigenVector<T>::Flatten(
GET_DATA_SAFELY(ddX, "Input", "DDX", "ReluGradGrad"));
......@@ -1375,7 +1375,7 @@ struct HardTanhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * ((x > static_cast<T>(t_min)) * (x < static_cast<T>(t_max)))
.template cast<T>();
......@@ -1431,11 +1431,11 @@ struct LeakyReluGradGradFunctor : public BaseActivationFunctor<T> {
template <typename Device>
void operator()(const Device& dev,
const DenseTensor* X,
const DenseTensor* Out,
const DenseTensor* Out UNUSED,
const DenseTensor* ddX,
DenseTensor* ddOut,
DenseTensor* dOut,
DenseTensor* dX) const {
DenseTensor* dOut UNUSED,
DenseTensor* dX UNUSED) const {
if (ddOut) {
auto* d = dev.eigen_device();
auto ddx = EigenVector<T>::Flatten(
......@@ -1479,7 +1479,7 @@ struct ThresholdedReluGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto th = static_cast<T>(threshold);
dx.device(d) = dout * (x > th).template cast<T>();
}
......@@ -1511,7 +1511,7 @@ struct Relu6GradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
float threshold = 6;
dx.device(d) =
dout * ((out > static_cast<T>(0)) * (out < static_cast<T>(threshold)))
......@@ -1540,7 +1540,7 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * (x.tanh() * x.tanh());
}
......@@ -1577,7 +1577,7 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 = x < static_cast<T>(threshold * -1.f);
auto temp2 = x > static_cast<T>(threshold);
dx.device(d) = dout * (temp1 || temp2).template cast<T>();
......@@ -1615,7 +1615,7 @@ struct SoftShrinkGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto lambdaT = static_cast<T>(lambda);
auto temp1 = (x > lambdaT).template cast<T>();
auto temp2 = (x < -lambdaT).template cast<T>();
......@@ -1673,7 +1673,7 @@ struct ELUGradNegativeAlphaFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
// case 2: alpha < 0
// dx = dout, if x > 0
// dx = dout * (out + alpha), if x <=0
......@@ -1742,7 +1742,7 @@ struct SiluGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 = static_cast<T>(1) + (-x).exp(); // 1+e^(-x)
auto temp2 = x * (-x).exp(); // x*e^(-x)
dx.device(d) = dout * ((static_cast<T>(1) / temp1) *
......@@ -1770,7 +1770,7 @@ struct SoftsignGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * (static_cast<T>(1) / (static_cast<T>(1) + x.abs()).square());
}
......@@ -1947,7 +1947,7 @@ struct LogSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp = (-x).cwiseMax(static_cast<T>(0)); // temp = max(-x, 0)
dx.device(d) =
dout * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp()));
......@@ -1984,7 +1984,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout *
((out > static_cast<T>(0)) * (out < static_cast<T>(1)))
.template cast<T>() *
......@@ -2012,7 +2012,7 @@ struct LogGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * (static_cast<T>(1) / x);
}
......@@ -2036,7 +2036,7 @@ struct Log2GradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(1) / (x * static_cast<T>(log(2)));
}
......@@ -2060,7 +2060,7 @@ struct Log10GradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(1) / (x * static_cast<T>(log(10)));
}
......@@ -2083,7 +2083,7 @@ struct Log1pGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * (static_cast<T>(1) / (x + static_cast<T>(1)));
}
......@@ -2157,7 +2157,7 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto tmp = ((x + static_cast<T>(offset)) < static_cast<T>(threshold))
.template cast<T>();
dx.device(d) =
......@@ -2193,7 +2193,7 @@ struct SwishGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out fake_out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out fake_out UNUSED, dOut dout, dX dx) const {
float beta = 1.0;
auto temp1 = static_cast<T>(1) /
(static_cast<T>(1) + (static_cast<T>(-beta) * x).exp());
......@@ -2229,7 +2229,7 @@ struct PowGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(factor) *
x.pow(static_cast<T>(factor) - static_cast<T>(1));
}
......@@ -2279,7 +2279,8 @@ struct ZeroGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(
Device d, X x UNUSED, Out out, dOut dout UNUSED, dX dx) const {
dx.device(d) = static_cast<T>(0) * out;
}
......@@ -2384,7 +2385,7 @@ struct CELUGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp_a_pos = static_cast<T>(alpha > 0);
auto temp_a_neg = static_cast<T>(alpha <= 0);
auto temp_x_pos = (x > static_cast<T>(0)).template cast<T>();
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef POLY_UTIL_CC_
#define POLY_UTIL_CC_
#include "paddle/phi/kernels/funcs/detection/poly_util.h"
namespace phi {
namespace funcs {
using phi::funcs::gpc_free_polygon;
using phi::funcs::gpc_polygon_clip;
template <class T>
void Array2PointVec(const T* box,
const size_t box_size,
std::vector<Point_<T>>* vec) {
size_t pts_num = box_size / 2;
(*vec).resize(pts_num);
for (size_t i = 0; i < pts_num; i++) {
(*vec).at(i).x = box[2 * i];
(*vec).at(i).y = box[2 * i + 1];
}
}
template <class T>
void Array2Poly(const T* box,
const size_t box_size,
phi::funcs::gpc_polygon* poly) {
size_t pts_num = box_size / 2;
(*poly).num_contours = 1;
(*poly).hole = reinterpret_cast<int*>(malloc(sizeof(int)));
(*poly).hole[0] = 0;
(*poly).contour =
(phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list));
(*poly).contour->num_vertices = pts_num;
(*poly).contour->vertex =
(phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num);
for (size_t i = 0; i < pts_num; ++i) {
(*poly).contour->vertex[i].x = box[2 * i];
(*poly).contour->vertex[i].y = box[2 * i + 1];
}
}
template <class T>
void PointVec2Poly(const std::vector<Point_<T>>& vec,
phi::funcs::gpc_polygon* poly) {
int pts_num = vec.size();
(*poly).num_contours = 1;
(*poly).hole = reinterpret_cast<int*>(malloc(sizeof(int)));
(*poly).hole[0] = 0;
(*poly).contour =
(phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list));
(*poly).contour->num_vertices = pts_num;
(*poly).contour->vertex =
(phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num);
for (size_t i = 0; i < pts_num; ++i) {
(*poly).contour->vertex[i].x = vec[i].x;
(*poly).contour->vertex[i].y = vec[i].y;
}
}
template <class T>
void Poly2PointVec(const phi::funcs::gpc_vertex_list& contour,
std::vector<Point_<T>>* vec) {
int pts_num = contour.num_vertices;
(*vec).resize(pts_num);
for (int i = 0; i < pts_num; i++) {
(*vec).at(i).x = contour.vertex[i].x;
(*vec).at(i).y = contour.vertex[i].y;
}
}
template <class T>
T GetContourArea(const std::vector<Point_<T>>& vec) {
size_t pts_num = vec.size();
if (pts_num < 3) return T(0.);
T area = T(0.);
for (size_t i = 0; i < pts_num; ++i) {
area += vec[i].x * vec[(i + 1) % pts_num].y -
vec[i].y * vec[(i + 1) % pts_num].x;
}
return std::fabs(area / 2.0);
}
template <class T>
T PolyArea(const T* box, const size_t box_size, const bool normalized) {
// If coordinate values are is invalid
// if area size <= 0, return 0.
std::vector<Point_<T>> vec;
Array2PointVec<T>(box, box_size, &vec);
return GetContourArea<T>(vec);
}
template <class T>
T PolyOverlapArea(const T* box1,
const T* box2,
const size_t box_size,
const bool normalized) {
phi::funcs::gpc_polygon poly1;
phi::funcs::gpc_polygon poly2;
Array2Poly<T>(box1, box_size, &poly1);
Array2Poly<T>(box2, box_size, &poly2);
phi::funcs::gpc_polygon respoly;
phi::funcs::gpc_op op = phi::funcs::GPC_INT;
phi::funcs::gpc_polygon_clip(op, &poly2, &poly1, &respoly);
T inter_area = T(0.);
int contour_num = respoly.num_contours;
for (int i = 0; i < contour_num; ++i) {
std::vector<Point_<T>> resvec;
Poly2PointVec<T>(respoly.contour[i], &resvec);
// inter_area += std::fabs(cv::contourArea(resvec)) + 0.5f *
// (cv::arcLength(resvec, true));
inter_area += GetContourArea<T>(resvec);
}
phi::funcs::gpc_free_polygon(&poly1);
phi::funcs::gpc_free_polygon(&poly2);
phi::funcs::gpc_free_polygon(&respoly);
return inter_area;
}
} // namespace funcs
} // namespace phi
#endif
......@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifndef POLY_UTIL_H_
#define POLY_UTIL_H_
#include <vector>
#include "paddle/phi/kernels/funcs/gpc.h"
......@@ -20,6 +23,9 @@ limitations under the License. */
namespace phi {
namespace funcs {
using phi::funcs::gpc_free_polygon;
using phi::funcs::gpc_polygon_clip;
template <class T>
class Point_ {
public:
......@@ -43,33 +49,114 @@ class Point_ {
template <class T>
void Array2PointVec(const T* box,
const size_t box_size,
std::vector<Point_<T>>* vec);
std::vector<Point_<T>>* vec) {
size_t pts_num = box_size / 2;
(*vec).resize(pts_num);
for (size_t i = 0; i < pts_num; i++) {
(*vec).at(i).x = box[2 * i];
(*vec).at(i).y = box[2 * i + 1];
}
}
template <class T>
void Array2Poly(const T* box,
const size_t box_size,
phi::funcs::gpc_polygon* poly);
phi::funcs::gpc_polygon* poly) {
size_t pts_num = box_size / 2;
(*poly).num_contours = 1;
(*poly).hole = reinterpret_cast<int*>(malloc(sizeof(int)));
(*poly).hole[0] = 0;
(*poly).contour =
(phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list));
(*poly).contour->num_vertices = pts_num;
(*poly).contour->vertex =
(phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num);
for (size_t i = 0; i < pts_num; ++i) {
(*poly).contour->vertex[i].x = box[2 * i];
(*poly).contour->vertex[i].y = box[2 * i + 1];
}
}
template <class T>
void PointVec2Poly(const std::vector<Point_<T>>& vec,
phi::funcs::gpc_polygon* poly);
phi::funcs::gpc_polygon* poly) {
int pts_num = vec.size();
(*poly).num_contours = 1;
(*poly).hole = reinterpret_cast<int*>(malloc(sizeof(int)));
(*poly).hole[0] = 0;
(*poly).contour =
(phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list));
(*poly).contour->num_vertices = pts_num;
(*poly).contour->vertex =
(phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num);
for (size_t i = 0; i < pts_num; ++i) {
(*poly).contour->vertex[i].x = vec[i].x;
(*poly).contour->vertex[i].y = vec[i].y;
}
}
template <class T>
void Poly2PointVec(const phi::funcs::gpc_vertex_list& contour,
std::vector<Point_<T>>* vec);
std::vector<Point_<T>>* vec) {
int pts_num = contour.num_vertices;
(*vec).resize(pts_num);
for (int i = 0; i < pts_num; i++) {
(*vec).at(i).x = contour.vertex[i].x;
(*vec).at(i).y = contour.vertex[i].y;
}
}
template <class T>
T GetContourArea(const std::vector<Point_<T>>& vec);
T GetContourArea(const std::vector<Point_<T>>& vec) {
size_t pts_num = vec.size();
if (pts_num < 3) return T(0.);
T area = T(0.);
for (size_t i = 0; i < pts_num; ++i) {
area += vec[i].x * vec[(i + 1) % pts_num].y -
vec[i].y * vec[(i + 1) % pts_num].x;
}
return std::fabs(area / 2.0);
}
template <class T>
T PolyArea(const T* box, const size_t box_size, const bool normalized);
T PolyArea(const T* box, const size_t box_size, const bool normalized UNUSED) {
// If coordinate values are is invalid
// if area size <= 0, return 0.
std::vector<Point_<T>> vec;
Array2PointVec<T>(box, box_size, &vec);
return GetContourArea<T>(vec);
}
template <class T>
T PolyOverlapArea(const T* box1,
const T* box2,
const size_t box_size,
const bool normalized);
const bool normalized UNUSED) {
phi::funcs::gpc_polygon poly1;
phi::funcs::gpc_polygon poly2;
Array2Poly<T>(box1, box_size, &poly1);
Array2Poly<T>(box2, box_size, &poly2);
phi::funcs::gpc_polygon respoly;
phi::funcs::gpc_op op = phi::funcs::GPC_INT;
phi::funcs::gpc_polygon_clip(op, &poly2, &poly1, &respoly);
T inter_area = T(0.);
int contour_num = respoly.num_contours;
for (int i = 0; i < contour_num; ++i) {
std::vector<Point_<T>> resvec;
Poly2PointVec<T>(respoly.contour[i], &resvec);
// inter_area += std::fabs(cv::contourArea(resvec)) + 0.5f *
// (cv::arcLength(resvec, true));
inter_area += GetContourArea<T>(resvec);
}
phi::funcs::gpc_free_polygon(&poly1);
phi::funcs::gpc_free_polygon(&poly2);
phi::funcs::gpc_free_polygon(&respoly);
return inter_area;
}
} // namespace funcs
} // namespace phi
#include "paddle/phi/kernels/funcs/detection/poly_util.cc"
#endif
......@@ -20,7 +20,7 @@ namespace phi {
template <typename T, typename Context>
void UniformRawKernel(const Context &dev_ctx,
const IntArray &shape,
DataType dtype,
DataType dtype UNUSED,
const Scalar &min,
const Scalar &max,
int seed,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册