未验证 提交 10758725 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53811)

上级 c1f4005a
...@@ -32,7 +32,7 @@ namespace operators { ...@@ -32,7 +32,7 @@ namespace operators {
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
class AllToAllOpCPUKernel : public framework::OpKernel<T> { class AllToAllOpCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Do not support alltoall for cpu kernel now.")); "Do not support alltoall for cpu kernel now."));
} }
......
...@@ -28,7 +28,7 @@ namespace operators { ...@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
class CIdentityOpCPUKernel : public framework::OpKernel<T> { class CIdentityOpCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_identity for cpu kernel now.")); "Do not support c_identity for cpu kernel now."));
} }
......
...@@ -30,7 +30,7 @@ namespace operators { ...@@ -30,7 +30,7 @@ namespace operators {
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
class CReduceScatterOpCPUKernel : public framework::OpKernel<T> { class CReduceScatterOpCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
"Unimplemented cpu kernel for CReduceScatterOp.")); "Unimplemented cpu kernel for CReduceScatterOp."));
} }
......
...@@ -28,7 +28,7 @@ namespace operators { ...@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
class CSplitOpCPUKernel : public framework::OpKernel<T> { class CSplitOpCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_split for cpu kernel now.")); "Do not support c_split for cpu kernel now."));
} }
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
template <typename T> template <typename T>
void index_put_kernel(const int64_t N, void index_put_kernel(const int64_t N,
const T* x, const T* x UNUSED,
const T* vals, const T* vals,
const int64_t** indices, const int64_t** indices,
const phi::DDim& stride, const phi::DDim& stride,
......
...@@ -20,10 +20,10 @@ ...@@ -20,10 +20,10 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void ReduceScatterKernel(const Context& dev_ctx, void ReduceScatterKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x, const DenseTensor& x UNUSED,
int nranks, int nranks UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW( PADDLE_THROW(
errors::Unimplemented("Unimplemented cpu kernel for CReduceScatterOp.")); errors::Unimplemented("Unimplemented cpu kernel for CReduceScatterOp."));
} }
......
...@@ -275,7 +275,7 @@ void RnnFunc(const Context& dev_ctx, ...@@ -275,7 +275,7 @@ void RnnFunc(const Context& dev_ctx,
DenseTensor* dropout_mask, DenseTensor* dropout_mask,
int num_layers, int num_layers,
int gate_num, int gate_num,
int input_size, int input_size UNUSED,
int hidden_size, int hidden_size,
bool is_bidirec, bool is_bidirec,
const std::string& cell_type, const std::string& cell_type,
......
...@@ -398,12 +398,12 @@ struct LambBetaPowUpdateFunctor { ...@@ -398,12 +398,12 @@ struct LambBetaPowUpdateFunctor {
template <typename MT> template <typename MT>
struct LambBetaPowUpdateFunctor<MT, /*NeedUpdateBetaPow=*/false> { struct LambBetaPowUpdateFunctor<MT, /*NeedUpdateBetaPow=*/false> {
void SetBetaPows(const MT* beta1pow, void SetBetaPows(const MT* beta1pow UNUSED,
const MT* beta2pow, const MT* beta2pow UNUSED,
MT* beta1pow_out, MT* beta1pow_out UNUSED,
MT* beta2pow_out, MT* beta2pow_out UNUSED,
MT beta1, MT beta1 UNUSED,
MT beta2) {} MT beta2 UNUSED) {}
HOSTDEVICE void UpdateBetaPow(size_t) const {} HOSTDEVICE void UpdateBetaPow(size_t) const {}
}; };
......
...@@ -70,7 +70,9 @@ class RowwiseMean2D<phi::GPUContext, T> { ...@@ -70,7 +70,9 @@ class RowwiseMean2D<phi::GPUContext, T> {
template <typename T> template <typename T>
class RowwiseMean2D<phi::CPUContext, T> { class RowwiseMean2D<phi::CPUContext, T> {
public: public:
RowwiseMean2D(int left, int right, const DeviceContext& dev_ctx) {} RowwiseMean2D(int left UNUSED,
int right UNUSED,
const DeviceContext& dev_ctx UNUSED) {}
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context,
const DenseTensor& input, const DenseTensor& input,
......
...@@ -204,7 +204,7 @@ struct TensorSetConstantWithPlace ...@@ -204,7 +204,7 @@ struct TensorSetConstantWithPlace
: context_(context), tensor_(tensor), value_(value) {} : context_(context), tensor_(tensor), value_(value) {}
template <typename Place> template <typename Place>
void operator()(Place place) const { void operator()(Place place UNUSED) const {
set_constant_with_place<Place>(context_, tensor_, value_); set_constant_with_place<Place>(context_, tensor_, value_);
} }
......
...@@ -73,7 +73,7 @@ class MaxPoolGrad { ...@@ -73,7 +73,7 @@ class MaxPoolGrad {
public: public:
static constexpr bool use_x = true; static constexpr bool use_x = true;
HOSTDEVICE inline void compute( HOSTDEVICE inline void compute(
const T& x, const T& y, const T& dy, T scale, T* dx) { const T& x, const T& y, const T& dy, T scale UNUSED, T* dx) {
*dx += dy * static_cast<T>(x == y); *dx += dy * static_cast<T>(x == y);
} }
}; };
......
...@@ -190,7 +190,7 @@ template struct SelectedRowsAddTensor<phi::CPUContext, double>; ...@@ -190,7 +190,7 @@ template struct SelectedRowsAddTensor<phi::CPUContext, double>;
template <typename T> template <typename T>
struct SelectedRowsAddTo<phi::CPUContext, T> { struct SelectedRowsAddTo<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::SelectedRows& input1, const phi::SelectedRows& input1,
const int64_t input2_offset, const int64_t input2_offset,
phi::SelectedRows* input2) { phi::SelectedRows* input2) {
......
...@@ -97,7 +97,7 @@ static void fast_mem_init(void* dest, ...@@ -97,7 +97,7 @@ static void fast_mem_init(void* dest,
template <typename T> template <typename T>
class PaddingLoDTensorFunctor<phi::CPUContext, T> { class PaddingLoDTensorFunctor<phi::CPUContext, T> {
public: public:
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& seq_tensor, const phi::DenseTensor& seq_tensor,
phi::DenseTensor* pad_tensor, phi::DenseTensor* pad_tensor,
const phi::DenseTensor& pad_value, const phi::DenseTensor& pad_value,
...@@ -157,7 +157,7 @@ class PaddingLoDTensorFunctor<phi::CPUContext, T> { ...@@ -157,7 +157,7 @@ class PaddingLoDTensorFunctor<phi::CPUContext, T> {
template <typename T> template <typename T>
class UnpaddingLoDTensorFunctor<phi::CPUContext, T> { class UnpaddingLoDTensorFunctor<phi::CPUContext, T> {
public: public:
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& pad_tensor, const phi::DenseTensor& pad_tensor,
phi::DenseTensor* seq_tensor, phi::DenseTensor* seq_tensor,
int pad_seq_len = -1, int pad_seq_len = -1,
......
...@@ -51,9 +51,9 @@ inline static size_t TotalSequenceLength( ...@@ -51,9 +51,9 @@ inline static size_t TotalSequenceLength(
inline static void CheckDims(const phi::DDim& seq_tensor_dims, inline static void CheckDims(const phi::DDim& seq_tensor_dims,
const phi::DDim& pad_tensor_dims, const phi::DDim& pad_tensor_dims,
const phi::Vector<size_t>& seq_offset, const phi::Vector<size_t>& seq_offset,
int64_t padded_seq_len, int64_t padded_seq_len UNUSED,
int64_t step_width, int64_t step_width UNUSED,
const PadLayout& layout) { const PadLayout& layout UNUSED) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
static_cast<size_t>(seq_tensor_dims[0]), static_cast<size_t>(seq_tensor_dims[0]),
seq_offset.back(), seq_offset.back(),
......
...@@ -31,7 +31,9 @@ static const std::vector<T> &ToVector(const std::vector<T> &vec) { ...@@ -31,7 +31,9 @@ static const std::vector<T> &ToVector(const std::vector<T> &vec) {
} }
template <typename T> template <typename T>
static std::vector<T> ToVector(const T *x, size_t n, const phi::Place &place) { static std::vector<T> ToVector(const T *x,
size_t n,
const phi::Place &place UNUSED) {
#ifdef __NVCC__ #ifdef __NVCC__
if (place.GetType() == phi::AllocationType::GPU) { if (place.GetType() == phi::AllocationType::GPU) {
using CopyT = typename std:: using CopyT = typename std::
......
...@@ -370,7 +370,7 @@ void FusedMatmulKernel(const Context &dev_ctx, ...@@ -370,7 +370,7 @@ void FusedMatmulKernel(const Context &dev_ctx,
const std::vector<int> &fused_transpose_Y, const std::vector<int> &fused_transpose_Y,
const std::vector<int> &fused_reshape_Out, const std::vector<int> &fused_reshape_Out,
const std::vector<int> &fused_transpose_Out, const std::vector<int> &fused_transpose_Out,
const std::string &mkldnn_data_type, const std::string &mkldnn_data_type UNUSED,
const float scale_x, const float scale_x,
const float scale_y, const float scale_y,
const float scale_in_eltwise, const float scale_in_eltwise,
......
...@@ -277,7 +277,7 @@ bool IsContinuous(const Type &weight_list) { ...@@ -277,7 +277,7 @@ bool IsContinuous(const Type &weight_list) {
} }
template <typename T> template <typename T>
void WeightToTensor(const Place &place, void WeightToTensor(const Place &place UNUSED,
gpuStream_t stream, gpuStream_t stream,
const std::vector<const DenseTensor *> &weight_list, const std::vector<const DenseTensor *> &weight_list,
DenseTensor *weight) { DenseTensor *weight) {
......
...@@ -44,7 +44,7 @@ void TransposeGradKernel(const Context& dev_ctx, ...@@ -44,7 +44,7 @@ void TransposeGradKernel(const Context& dev_ctx,
template <typename T, typename Context> template <typename T, typename Context>
void TransLayoutGradKernel(const Context& dev_ctx, void TransLayoutGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& axis, const std::vector<int>& axis,
DenseTensor* x_grad) { DenseTensor* x_grad) {
......
...@@ -112,7 +112,7 @@ void SumCsrGradKernel(const Context& dev_ctx, ...@@ -112,7 +112,7 @@ void SumCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
const SparseCsrTensor& dout, const SparseCsrTensor& dout,
const IntArray& axis, const IntArray& axis,
bool keep_dim, bool keep_dim UNUSED,
SparseCsrTensor* dx) { SparseCsrTensor* dx) {
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, dx); EmptyLikeCsrKernel<T, Context>(dev_ctx, x, dx);
unsigned int n_dim = axis.size(); unsigned int n_dim = axis.size();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册