未验证 提交 f7267412 编写于 作者: R risemeup1 提交者: GitHub

fix_gcc12_error (#52083)

* fix_gcc12_error

* fix gcc12 error

* fix gcc12 error
上级 b2bd74f7
...@@ -1638,7 +1638,7 @@ void BrpcPsClient::PushSparseTaskConsume() { ...@@ -1638,7 +1638,7 @@ void BrpcPsClient::PushSparseTaskConsume() {
while (!task_queue->Empty() && merge_count < cur_meger_size) { while (!task_queue->Empty() && merge_count < cur_meger_size) {
++merge_count; ++merge_count;
SparseAsyncTask *task; SparseAsyncTask *task = nullptr;
task_queue->Get(task); task_queue->Get(task);
task_list.push_back(std::shared_ptr<SparseAsyncTask>(task)); task_list.push_back(std::shared_ptr<SparseAsyncTask>(task));
} }
...@@ -1943,7 +1943,7 @@ void BrpcPsClient::PushDenseTaskConsume() { ...@@ -1943,7 +1943,7 @@ void BrpcPsClient::PushDenseTaskConsume() {
continue; continue;
} }
++_async_call_num; ++_async_call_num;
DenseAsyncTask *task; DenseAsyncTask *task = nullptr;
task_queue->Get(task); task_queue->Get(task);
auto *accessor = GetTableAccessor(task->table_id()); auto *accessor = GetTableAccessor(task->table_id());
// 设置请求回调 // 设置请求回调
......
...@@ -149,7 +149,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor( ...@@ -149,7 +149,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(
phi::DenseTensor *dst_lodtensor) { phi::DenseTensor *dst_lodtensor) {
// calc dst type,layout,dim,lod and calc check dim // calc dst type,layout,dim,lod and calc check dim
proto::VarType::Type new_type = proto::VarType::FP32; proto::VarType::Type new_type = proto::VarType::FP32;
phi::DataLayout new_layout; phi::DataLayout new_layout = phi::DataLayout::UNDEFINED;
framework::DDim new_dim; framework::DDim new_dim;
LoD new_lod = src_lodtensors[0]->lod(); LoD new_lod = src_lodtensors[0]->lod();
......
...@@ -33,7 +33,7 @@ class CTCAlignKernel : public framework::OpKernel<T> { ...@@ -33,7 +33,7 @@ class CTCAlignKernel : public framework::OpKernel<T> {
size_t blank = static_cast<size_t>(ctx.Attr<int>("blank")); size_t blank = static_cast<size_t>(ctx.Attr<int>("blank"));
bool merge_repeated = ctx.Attr<bool>("merge_repeated"); bool merge_repeated = ctx.Attr<bool>("merge_repeated");
T* output_data = output->mutable_data<T>(ctx.GetPlace()); T* output_data = output->mutable_data<T>(ctx.GetPlace());
auto input_dims = input->dims(); auto input_dims = phi::vectorize<int>(input->dims());
const T* input_data = input->data<T>(); const T* input_data = input->data<T>();
// support tensor input, no lod information // support tensor input, no lod information
......
...@@ -26,7 +26,7 @@ class PolygonBoxTransformCPUKernel : public framework::OpKernel<T> { ...@@ -26,7 +26,7 @@ class PolygonBoxTransformCPUKernel : public framework::OpKernel<T> {
true, true,
platform::errors::InvalidArgument("It must use CUDAPlace.")); platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* in = ctx.Input<phi::DenseTensor>("Input"); auto* in = ctx.Input<phi::DenseTensor>("Input");
auto in_dims = in->dims(); auto in_dims = phi::vectorize<int>(in->dims());
const T* in_data = in->data<T>(); const T* in_data = in->data<T>();
auto* out = ctx.Output<phi::DenseTensor>("Output"); auto* out = ctx.Output<phi::DenseTensor>("Output");
T* out_data = out->mutable_data<T>(ctx.GetPlace()); T* out_data = out->mutable_data<T>(ctx.GetPlace());
......
...@@ -403,7 +403,7 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> { ...@@ -403,7 +403,7 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
auto transformed_width = ctx.Attr<int>("transformed_width"); auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims(); auto in_dims = phi::vectorize<int>(in->dims());
int batch_size = in_dims[0]; int batch_size = in_dims[0];
int channels = in_dims[1]; int channels = in_dims[1];
int in_height = in_dims[2]; int in_height = in_dims[2];
......
...@@ -35,7 +35,7 @@ class SequenceEnumerateKernel : public framework::OpKernel<T> { ...@@ -35,7 +35,7 @@ class SequenceEnumerateKernel : public framework::OpKernel<T> {
"Input(X) phi::DenseTensor of SequenceEnumerateOp does not contain " "Input(X) phi::DenseTensor of SequenceEnumerateOp does not contain "
"LoD information.")); "LoD information."));
auto in_dims = in->dims(); auto in_dims = phi::vectorize<int>(in->dims());
auto lod0 = in->lod()[0]; auto lod0 = in->lod()[0];
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]), static_cast<uint64_t>(in_dims[0]),
......
...@@ -60,7 +60,7 @@ void TDMSamplerInner(const framework::ExecutionContext &context, ...@@ -60,7 +60,7 @@ void TDMSamplerInner(const framework::ExecutionContext &context,
} }
VLOG(3) << "TDM: sample res length: " << sample_res_length; VLOG(3) << "TDM: sample res length: " << sample_res_length;
auto travel_dim = travel_lod_tensor.dims(); auto travel_dim = phi::vectorize<int>(travel_lod_tensor.dims());
auto total_sample_nums = input_ids_num * sample_res_length; auto total_sample_nums = input_ids_num * sample_res_length;
// get all data // get all data
......
...@@ -81,7 +81,7 @@ void RoiAlignGradKernel(const Context& dev_ctx, ...@@ -81,7 +81,7 @@ void RoiAlignGradKernel(const Context& dev_ctx,
int sampling_ratio, int sampling_ratio,
bool aligned, bool aligned,
DenseTensor* dx) { DenseTensor* dx) {
const auto& in_dims = x.dims(); const auto& in_dims = phi::vectorize<int>(x.dims());
int channels = in_dims[1]; int channels = in_dims[1];
int height = in_dims[2]; int height = in_dims[2];
int width = in_dims[3]; int width = in_dims[3];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册