未验证 提交 c174aa22 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53839)

上级 6cb53e91
......@@ -20,15 +20,15 @@ namespace phi {
template <typename T, typename Context>
void LUUnpackGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& pivots,
const DenseTensor& l,
const DenseTensor& u,
const DenseTensor& pmat,
const DenseTensor& x UNUSED,
const DenseTensor& pivots UNUSED,
const DenseTensor& l UNUSED,
const DenseTensor& u UNUSED,
const DenseTensor& pmat UNUSED,
const DenseTensor& l_grad,
const DenseTensor& u_grad,
bool unpack_ludata,
bool unpack_pivots,
bool unpack_ludata UNUSED,
bool unpack_pivots UNUSED,
DenseTensor* x_grad) {
dev_ctx.template Alloc<T>(x_grad);
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void PoissonGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& out_grad UNUSED,
DenseTensor* x_grad) {
ctx.template Alloc<T>(x_grad);
phi::funcs::SetConstant<Context, T> functor;
......
......@@ -142,7 +142,7 @@ void PoolGradRawKernel(const Context& ctx,
template <typename Context, typename T1, typename T2 = int>
void MaxPoolWithIndexGradRawKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
......@@ -192,7 +192,7 @@ void Pool2dGradKernel(const Context& ctx,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool ceil_mode UNUSED,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
......@@ -283,7 +283,7 @@ void Pool3dGradKernel(const Context& ctx,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool ceil_mode UNUSED,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
......
......@@ -228,7 +228,7 @@ void Pool2dKernel(const Context& ctx,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool ceil_mode UNUSED,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
......@@ -279,7 +279,7 @@ void Pool3dKernel(const Context& ctx,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool ceil_mode UNUSED,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
......
......@@ -88,7 +88,7 @@ void QrGradKernel(const Context& ctx,
auto m_gt_n_case = [](const Context& ctx,
const DenseTensor& dQ,
const DenseTensor& dR,
const DenseTensor& A,
const DenseTensor& A UNUSED,
const DenseTensor& Q,
const DenseTensor& R) -> DenseTensor {
// Hai-Jun Liao, Jin-Guo Liu, Lei Wang, Tao Xiang (2019). Differentiable
......
......@@ -32,14 +32,14 @@ namespace phi {
namespace funcs {
template <typename T>
void RenormFunc(const phi::CPUContext& ctx,
void RenormFunc(const phi::CPUContext& ctx UNUSED,
const T* x_data,
T* out_data,
float p,
int dim,
float max_norm,
int64_t dimension_each,
phi::DDim& input_dims,
const phi::DDim& input_dims,
int64_t numel) {
auto dim_size = input_dims.size();
int64_t dim_divisor = 1;
......@@ -83,7 +83,7 @@ void RenormFunc(const phi::CPUContext& ctx,
}
template <typename T>
void RenormGradFunc(const phi::CPUContext& ctx,
void RenormGradFunc(const phi::CPUContext& ctx UNUSED,
const T* x_data,
const T* dout_data,
T* dx_data,
......@@ -91,7 +91,7 @@ void RenormGradFunc(const phi::CPUContext& ctx,
int dim,
float max_norm,
int64_t dimension_each,
phi::DDim& input_dims,
const phi::DDim& input_dims,
int64_t numel) {
auto dim_size = input_dims.size();
int64_t dim_divisor = 1;
......@@ -116,8 +116,9 @@ void RenormGradFunc(const phi::CPUContext& ctx,
dim_power_sum[i] =
std::pow(dim_value[i], (T)(-1.0 - 1.0 / p)) * -1 * max_norm;
dim_value[i] = max_norm / temp;
} else
} else {
dim_value[i] = 1.0;
}
}
index = dim_index = 0;
for (int64_t i = 0; i < numel; i++) {
......@@ -271,7 +272,7 @@ void RenormFunc(const phi::GPUContext& ctx,
int dim,
float max_norm,
int64_t dimension_each,
phi::DDim& input_dims,
const phi::DDim& input_dims,
int64_t numel) {
auto dim_size = input_dims.size();
DenseTensor pow_value, dim_value;
......@@ -307,7 +308,7 @@ void RenormGradFunc(const phi::GPUContext& ctx,
int dim,
float max_norm,
int64_t dimension_each,
phi::DDim& input_dims,
const phi::DDim& input_dims,
int64_t numel) {
auto dim_size = input_dims.size();
int64_t dim_divisor = 1, pre_mul = 1;
......
......@@ -225,7 +225,8 @@ void RmspropSparseKernel(const Context &ctx,
const DenseTensor &moment,
const DenseTensor &learning_rate,
const paddle::optional<DenseTensor> &mean_grad_opt,
const paddle::optional<DenseTensor> &master_param,
const paddle::optional<DenseTensor> &master_param
UNUSED,
float epsilon_t,
float decay_t,
float momentum_t,
......
......@@ -20,12 +20,12 @@ namespace phi {
template <typename T, typename Context>
void SequencePoolGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const paddle::optional<DenseTensor>& max_index,
const DenseTensor& out_grad,
bool is_test,
bool is_test UNUSED,
const std::string& pooltype,
float pad_value,
float pad_value UNUSED,
DenseTensor* x_grad) {
const phi::DenseTensor* index = nullptr;
if (pooltype == "MAX") {
......
......@@ -67,7 +67,7 @@ static DenseTensor Unsqueeze(const DenseTensor& x, int axis = 0) {
template <typename T, typename Context>
void SvdGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& u,
const DenseTensor& vh,
const DenseTensor& s,
......
......@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context>
void UnfoldGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
const std::vector<int>& kernel_sizes,
const std::vector<int>& strides,
......
......@@ -29,11 +29,11 @@ namespace phi {
template <typename T, typename Context>
void WarpctcGradKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& logits UNUSED,
const paddle::optional<DenseTensor>& logits_length,
const DenseTensor& warpctcgrad,
const DenseTensor& loss_grad,
int blank,
int blank UNUSED,
bool norm_by_times,
DenseTensor* logits_grad) {
dev_ctx.template Alloc<T>(logits_grad);
......
......@@ -233,7 +233,7 @@ void WarpctcKernel(const Context& dev_ctx,
const paddle::optional<DenseTensor>& logits_length,
const paddle::optional<DenseTensor>& labels_length,
int blank,
bool norm_by_times,
bool norm_by_times UNUSED,
DenseTensor* loss,
DenseTensor* warpctcgrad) {
size_t num_sequences, sequence_width, max_sequence_length;
......
......@@ -25,12 +25,12 @@ namespace phi {
template <typename T, typename Context>
void WarprnntGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& input_lengths,
const DenseTensor& input UNUSED,
const DenseTensor& input_lengths UNUSED,
const DenseTensor& warprnntgrad,
const DenseTensor& loss_grad,
int blank,
float fastemit_lambda,
int blank UNUSED,
float fastemit_lambda UNUSED,
DenseTensor* input_grad) {
dev_ctx.template Alloc<T>(input_grad);
......
......@@ -67,7 +67,7 @@ void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
bool allow_out_of_range UNUSED,
DenseTensor* out) {
auto depth_v = depth.to<int>();
auto out_dims = out->dims();
......
......@@ -26,7 +26,7 @@ void RandintWithSeedKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
DataType dtype UNUSED,
int seed,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
......
......@@ -217,7 +217,7 @@ void SwishGradKernel(const Context& dev_ctx,
template <typename T, typename Context>
void EluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out,
const DenseTensor& dout,
float alpha,
......
......@@ -372,12 +372,12 @@ void Conv2dTransposeKernel(const Context& dev_ctx,
const DenseTensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const IntArray& output_size,
const std::vector<int>& output_padding UNUSED,
const IntArray& output_size UNUSED,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
const std::string& data_format UNUSED,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(dev_ctx.GetPlace().GetType(),
AllocationType::CPU,
......
......@@ -20,9 +20,9 @@
namespace phi {
template <typename T, typename Context>
void ExpandGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
const IntArray& shape,
const IntArray& shape UNUSED,
DenseTensor* in_grad) {
const auto& onednn_engine = dev_ctx.GetEngine();
......
......@@ -84,7 +84,7 @@ template <typename T>
void ReduceSumForMatmulGradOutput(const OneDNNContext &dev_ctx,
const DenseTensor *dx_tmp,
DenseTensor *dx,
const std::vector<int64_t> &dx_dims,
const std::vector<int64_t> &dx_dims UNUSED,
const std::vector<int64_t> &x_dims) {
funcs::ReductionOneDNNHandler<T> handler(dnnl::algorithm::reduction_sum,
0.0f,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册