未验证 提交 c1f4005a 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53843)

上级 c174aa22
...@@ -53,12 +53,12 @@ template <bool is_multi_threads> ...@@ -53,12 +53,12 @@ template <bool is_multi_threads>
struct GetInputIndex { struct GetInputIndex {
void operator()(const std::vector<int>& lhs_dims, void operator()(const std::vector<int>& lhs_dims,
const std::vector<int>& rhs_dims, const std::vector<int>& rhs_dims,
const std::vector<int>& output_dims, const std::vector<int>& output_dims UNUSED,
const std::vector<int>& lhs_strides, const std::vector<int>& lhs_strides,
const std::vector<int>& rhs_strides, const std::vector<int>& rhs_strides,
const std::vector<int>& output_strides, const std::vector<int>& output_strides,
int output_idx, int output_idx,
int* index_array, int* index_array UNUSED,
int* lhs_idx, int* lhs_idx,
int* rhs_idx) { int* rhs_idx) {
int out_dims_size = output_strides.size(); int out_dims_size = output_strides.size();
......
...@@ -28,7 +28,7 @@ namespace funcs { ...@@ -28,7 +28,7 @@ namespace funcs {
template <class T> template <class T>
class Vol2ColFunctor<phi::CPUContext, T> { class Vol2ColFunctor<phi::CPUContext, T> {
public: public:
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& vol, const phi::DenseTensor& vol,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
...@@ -154,7 +154,7 @@ class Vol2ColFunctor<phi::CPUContext, T> { ...@@ -154,7 +154,7 @@ class Vol2ColFunctor<phi::CPUContext, T> {
template <class T> template <class T>
class Col2VolFunctor<phi::CPUContext, T> { class Col2VolFunctor<phi::CPUContext, T> {
public: public:
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& col, const phi::DenseTensor& col,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
......
...@@ -24,7 +24,7 @@ template <typename T, typename Context> ...@@ -24,7 +24,7 @@ template <typename T, typename Context>
void FusedSoftplusKernel(const Context& dev_ctx, void FusedSoftplusKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
float beta, float beta,
float threshold, float threshold UNUSED,
const std::string& fuse_activation, const std::string& fuse_activation,
const float fuse_alpha, const float fuse_alpha,
const float fuse_beta, const float fuse_beta,
......
...@@ -86,7 +86,7 @@ void RnnGradKernel(const Context &dev_ctx, ...@@ -86,7 +86,7 @@ void RnnGradKernel(const Context &dev_ctx,
const std::vector<const DenseTensor *> &state_grad, const std::vector<const DenseTensor *> &state_grad,
float dropout_prob, float dropout_prob,
bool is_bidirec, bool is_bidirec,
int input_size, int input_size UNUSED,
int hidden_size, int hidden_size,
int num_layers, int num_layers,
const std::string &mode, const std::string &mode,
......
...@@ -134,7 +134,7 @@ void RnnKernel(const Context &dev_ctx, ...@@ -134,7 +134,7 @@ void RnnKernel(const Context &dev_ctx,
const paddle::optional<DenseTensor> &sequence_length, const paddle::optional<DenseTensor> &sequence_length,
float dropout_prob, float dropout_prob,
bool is_bidirec, bool is_bidirec,
int input_size, int input_size UNUSED,
int hidden_size, int hidden_size,
int num_layers, int num_layers,
const std::string &mode, const std::string &mode,
......
...@@ -91,12 +91,13 @@ void AdagradSparseKernel(const Context& ctx, ...@@ -91,12 +91,13 @@ void AdagradSparseKernel(const Context& ctx,
const SelectedRows& grad_t, const SelectedRows& grad_t,
const DenseTensor& moment_t, const DenseTensor& moment_t,
const DenseTensor& learning_rate, const DenseTensor& learning_rate,
const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& master_param
UNUSED,
float epsilon_t, float epsilon_t,
bool multi_precision, bool multi_precision UNUSED,
DenseTensor* param_out, DenseTensor* param_out,
DenseTensor* moment_out, DenseTensor* moment_out,
DenseTensor* master_param_outs) { DenseTensor* master_param_outs UNUSED) {
auto* param_out_tensor = param_out; auto* param_out_tensor = param_out;
auto* moment_out_tensor = moment_out; auto* moment_out_tensor = moment_out;
......
...@@ -28,15 +28,15 @@ void AdamaxKernel(const Context& dev_ctx, ...@@ -28,15 +28,15 @@ void AdamaxKernel(const Context& dev_ctx,
const DenseTensor& moment, const DenseTensor& moment,
const DenseTensor& inf_norm, const DenseTensor& inf_norm,
const DenseTensor& beta1_pow, const DenseTensor& beta1_pow,
const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& master_param UNUSED,
float beta1, float beta1,
float beta2, float beta2,
float epsilon, float epsilon,
bool multi_precision, bool multi_precision UNUSED,
DenseTensor* param_out, DenseTensor* param_out,
DenseTensor* moment_out, DenseTensor* moment_out,
DenseTensor* inf_norm_out, DenseTensor* inf_norm_out,
DenseTensor* master_param_outs) { DenseTensor* master_param_outs UNUSED) {
dev_ctx.template Alloc<T>(param_out); dev_ctx.template Alloc<T>(param_out);
dev_ctx.template Alloc<T>(moment_out); dev_ctx.template Alloc<T>(moment_out);
dev_ctx.template Alloc<T>(inf_norm_out); dev_ctx.template Alloc<T>(inf_norm_out);
......
...@@ -76,10 +76,10 @@ inline HOSTDEVICE void Update(const FoundInfFlagT found_inf_data, ...@@ -76,10 +76,10 @@ inline HOSTDEVICE void Update(const FoundInfFlagT found_inf_data,
template <typename Context, typename T> template <typename Context, typename T>
class LazyZeros { class LazyZeros {
public: public:
void operator()(const DeviceContext& dev_ctx, void operator()(const DeviceContext& dev_ctx UNUSED,
const bool* found_inf_data, const bool* found_inf_data UNUSED,
const std::vector<const DenseTensor*>& xs, const std::vector<const DenseTensor*>& xs UNUSED,
const std::vector<DenseTensor*>& outs) const {} const std::vector<DenseTensor*>& outs UNUSED) const {}
}; };
template <typename Context, typename T, bool IsFoundInfOnCPU> template <typename Context, typename T, bool IsFoundInfOnCPU>
......
...@@ -50,9 +50,9 @@ void ImagGradKernel(const Context& dev_ctx, ...@@ -50,9 +50,9 @@ void ImagGradKernel(const Context& dev_ctx,
template <typename T> template <typename T>
struct ComplexGradForRealFunctor { struct ComplexGradForRealFunctor {
inline HOSTDEVICE T operator()(const T x, inline HOSTDEVICE T operator()(const T x UNUSED,
const T y, const T y UNUSED,
const phi::dtype::complex<T> out, const phi::dtype::complex<T> out UNUSED,
const phi::dtype::complex<T> dout) { const phi::dtype::complex<T> dout) {
return dout.real; return dout.real;
} }
...@@ -60,9 +60,9 @@ struct ComplexGradForRealFunctor { ...@@ -60,9 +60,9 @@ struct ComplexGradForRealFunctor {
template <typename T> template <typename T>
struct ComplexGradForImagFunctor { struct ComplexGradForImagFunctor {
inline HOSTDEVICE T operator()(const T x, inline HOSTDEVICE T operator()(const T x UNUSED,
const T y, const T y UNUSED,
const phi::dtype::complex<T> out, const phi::dtype::complex<T> out UNUSED,
const phi::dtype::complex<T> dout) { const phi::dtype::complex<T> dout) {
return dout.imag; return dout.imag;
} }
......
...@@ -306,8 +306,8 @@ void Conv2dTransposeGradKernel(const Context& ctx, ...@@ -306,8 +306,8 @@ void Conv2dTransposeGradKernel(const Context& ctx,
const DenseTensor& dout, const DenseTensor& dout,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& output_padding, const std::vector<int>& output_padding UNUSED,
const IntArray& output_size, const IntArray& output_size UNUSED,
const std::string& padding_algorithm, const std::string& padding_algorithm,
int groups, int groups,
const std::vector<int>& dilations, const std::vector<int>& dilations,
...@@ -335,8 +335,8 @@ void Conv3dTransposeGradKernel(const Context& ctx, ...@@ -335,8 +335,8 @@ void Conv3dTransposeGradKernel(const Context& ctx,
const DenseTensor& dout, const DenseTensor& dout,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& output_padding, const std::vector<int>& output_padding UNUSED,
const std::vector<int>& output_size, const std::vector<int>& output_size UNUSED,
const std::string& padding_algorithm, const std::string& padding_algorithm,
int groups, int groups,
const std::vector<int>& dilations, const std::vector<int>& dilations,
......
...@@ -227,8 +227,8 @@ void Conv2dTransposeKernel(const Context& ctx, ...@@ -227,8 +227,8 @@ void Conv2dTransposeKernel(const Context& ctx,
const DenseTensor& filter, const DenseTensor& filter,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& output_padding, const std::vector<int>& output_padding UNUSED,
const IntArray& output_size, const IntArray& output_size UNUSED,
const std::string& padding_algorithm, const std::string& padding_algorithm,
int groups, int groups,
const std::vector<int>& dilations, const std::vector<int>& dilations,
...@@ -252,8 +252,8 @@ void Conv3dTransposeKernel(const Context& ctx, ...@@ -252,8 +252,8 @@ void Conv3dTransposeKernel(const Context& ctx,
const DenseTensor& filter, const DenseTensor& filter,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& output_padding, const std::vector<int>& output_padding UNUSED,
const std::vector<int>& output_size, const std::vector<int>& output_size UNUSED,
const std::string& padding_algorithm, const std::string& padding_algorithm,
int groups, int groups,
const std::vector<int>& dilations, const std::vector<int>& dilations,
......
...@@ -127,9 +127,9 @@ struct DivGradDX { ...@@ -127,9 +127,9 @@ struct DivGradDX {
template <typename T> template <typename T>
struct DivGradDX<phi::dtype::complex<T>> { struct DivGradDX<phi::dtype::complex<T>> {
HOSTDEVICE phi::dtype::complex<T> operator()( HOSTDEVICE phi::dtype::complex<T> operator()(
phi::dtype::complex<T> x, phi::dtype::complex<T> x UNUSED,
phi::dtype::complex<T> y, phi::dtype::complex<T> y,
phi::dtype::complex<T> out, phi::dtype::complex<T> out UNUSED,
phi::dtype::complex<T> dout) const { phi::dtype::complex<T> dout) const {
phi::dtype::complex<T> y_conj(y.real, -y.imag); phi::dtype::complex<T> y_conj(y.real, -y.imag);
return dout / y_conj; return dout / y_conj;
...@@ -146,7 +146,7 @@ struct DivGradDY { ...@@ -146,7 +146,7 @@ struct DivGradDY {
template <typename T> template <typename T>
struct DivGradDY<phi::dtype::complex<T>> { struct DivGradDY<phi::dtype::complex<T>> {
HOSTDEVICE phi::dtype::complex<T> operator()( HOSTDEVICE phi::dtype::complex<T> operator()(
phi::dtype::complex<T> x, phi::dtype::complex<T> x UNUSED,
phi::dtype::complex<T> y, phi::dtype::complex<T> y,
phi::dtype::complex<T> out, phi::dtype::complex<T> out,
phi::dtype::complex<T> dout) const { phi::dtype::complex<T> dout) const {
...@@ -383,9 +383,9 @@ struct MulGradDX<bool> { ...@@ -383,9 +383,9 @@ struct MulGradDX<bool> {
template <typename T> template <typename T>
struct MulGradDX<phi::dtype::complex<T>> { struct MulGradDX<phi::dtype::complex<T>> {
HOSTDEVICE phi::dtype::complex<T> operator()( HOSTDEVICE phi::dtype::complex<T> operator()(
phi::dtype::complex<T> x, phi::dtype::complex<T> x UNUSED,
phi::dtype::complex<T> y, phi::dtype::complex<T> y,
phi::dtype::complex<T> out, phi::dtype::complex<T> out UNUSED,
phi::dtype::complex<T> dout) const { phi::dtype::complex<T> dout) const {
phi::dtype::complex<T> y_conj(y.real, -y.imag); phi::dtype::complex<T> y_conj(y.real, -y.imag);
return dout * y_conj; return dout * y_conj;
...@@ -420,8 +420,8 @@ template <typename T> ...@@ -420,8 +420,8 @@ template <typename T>
struct MulGradDY<phi::dtype::complex<T>> { struct MulGradDY<phi::dtype::complex<T>> {
HOSTDEVICE phi::dtype::complex<T> operator()( HOSTDEVICE phi::dtype::complex<T> operator()(
phi::dtype::complex<T> x, phi::dtype::complex<T> x,
phi::dtype::complex<T> y, phi::dtype::complex<T> y UNUSED,
phi::dtype::complex<T> out, phi::dtype::complex<T> out UNUSED,
phi::dtype::complex<T> dout) const { phi::dtype::complex<T> dout) const {
phi::dtype::complex<T> x_conj(x.real, -x.imag); phi::dtype::complex<T> x_conj(x.real, -x.imag);
return dout * x_conj; return dout * x_conj;
......
...@@ -81,7 +81,7 @@ void FFTC2RGradKernel(const Context& ctx, ...@@ -81,7 +81,7 @@ void FFTC2RGradKernel(const Context& ctx,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::string& normalization, const std::string& normalization,
bool forward, bool forward,
int64_t last_dim_size, int64_t last_dim_size UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
using C = phi::dtype::complex<T>; using C = phi::dtype::complex<T>;
ctx.template Alloc<C>(x_grad); ctx.template Alloc<C>(x_grad);
......
...@@ -44,7 +44,7 @@ void FFTC2RKernel(const Context& ctx, ...@@ -44,7 +44,7 @@ void FFTC2RKernel(const Context& ctx,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::string& normalization, const std::string& normalization,
bool forward, bool forward,
int64_t last_dim_size, int64_t last_dim_size UNUSED,
DenseTensor* out) { DenseTensor* out) {
using R = typename T::value_type; // get real type using R = typename T::value_type; // get real type
ctx.template Alloc<R>(out); ctx.template Alloc<R>(out);
......
...@@ -25,7 +25,7 @@ namespace phi { ...@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FoldGradKernel(const Context& ctx, void FoldGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& output_sizes, const std::vector<int>& output_sizes,
const std::vector<int>& kernel_sizes, const std::vector<int>& kernel_sizes,
......
...@@ -36,7 +36,7 @@ struct KLDivLossBackward { ...@@ -36,7 +36,7 @@ struct KLDivLossBackward {
template <typename T, typename Context> template <typename T, typename Context>
void KLDivLossGradKernel(const Context& dev_ctx, void KLDivLossGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& d_out, const DenseTensor& d_out,
const std::string& reduction, const std::string& reduction,
......
...@@ -98,10 +98,10 @@ static void LerpGradFunction(const Context& ctx, ...@@ -98,10 +98,10 @@ static void LerpGradFunction(const Context& ctx,
template <typename Context, typename T> template <typename Context, typename T>
static void LerpGradFunctionZero(const Context& ctx, static void LerpGradFunctionZero(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const DenseTensor& weight, const DenseTensor& weight,
const DenseTensor& out, const DenseTensor& out UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* y_grad) { DenseTensor* y_grad) {
......
...@@ -55,7 +55,7 @@ void LogsumexpGradKernel(const Context& dev_ctx, ...@@ -55,7 +55,7 @@ void LogsumexpGradKernel(const Context& dev_ctx,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axis, const std::vector<int64_t>& axis,
bool keepdim, bool keepdim UNUSED,
bool reduce_all, bool reduce_all,
DenseTensor* in_grad) { DenseTensor* in_grad) {
dev_ctx.template Alloc<T>(in_grad); dev_ctx.template Alloc<T>(in_grad);
......
...@@ -28,7 +28,7 @@ void LUGradKernel(const Context& dev_ctx, ...@@ -28,7 +28,7 @@ void LUGradKernel(const Context& dev_ctx,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& pivots, const DenseTensor& pivots,
const DenseTensor& out_grad, const DenseTensor& out_grad,
bool pivot, bool pivot UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
dev_ctx.template Alloc<T>(x_grad); dev_ctx.template Alloc<T>(x_grad);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册