Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b23914c2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b23914c2
编写于
6月 01, 2022
作者:
A
Aganlengzi
提交者:
GitHub
6月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[fix] split nanmedian fluid deps (#43135)
上级
ef79403e
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
147 addition
and
93 deletion
+147
-93
paddle/phi/kernels/cpu/nanmedian_grad_kernel.cc
paddle/phi/kernels/cpu/nanmedian_grad_kernel.cc
+2
-0
paddle/phi/kernels/cpu/nanmedian_kernel.cc
paddle/phi/kernels/cpu/nanmedian_kernel.cc
+2
-0
paddle/phi/kernels/gpu/nanmedian_grad_kernel.cu
paddle/phi/kernels/gpu/nanmedian_grad_kernel.cu
+3
-1
paddle/phi/kernels/gpu/nanmedian_kernel.cu
paddle/phi/kernels/gpu/nanmedian_kernel.cu
+3
-1
paddle/phi/kernels/impl/nanmedian_grad_kernel_impl.h
paddle/phi/kernels/impl/nanmedian_grad_kernel_impl.h
+66
-0
paddle/phi/kernels/impl/nanmedian_kernel_impl.h
paddle/phi/kernels/impl/nanmedian_kernel_impl.h
+69
-0
paddle/phi/kernels/nanmedian_grad_kernel.h
paddle/phi/kernels/nanmedian_grad_kernel.h
+1
-44
paddle/phi/kernels/nanmedian_kernel.h
paddle/phi/kernels/nanmedian_kernel.h
+1
-47
未找到文件。
paddle/phi/kernels/cpu/nanmedian_grad_kernel.cc
浏览文件 @
b23914c2
...
...
@@ -13,9 +13,11 @@
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/nanmedian_grad_kernel_impl.h"
namespace
phi
{
...
...
paddle/phi/kernels/cpu/nanmedian_kernel.cc
浏览文件 @
b23914c2
...
...
@@ -13,8 +13,10 @@
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/nanmedian_kernel_impl.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace
phi
{
...
...
paddle/phi/kernels/gpu/nanmedian_grad_kernel.cu
浏览文件 @
b23914c2
...
...
@@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_grad_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/
nanmedian_grad_kerne
l.h"
#include "paddle/phi/kernels/
impl/nanmedian_grad_kernel_imp
l.h"
namespace
phi
{
...
...
paddle/phi/kernels/gpu/nanmedian_kernel.cu
浏览文件 @
b23914c2
...
...
@@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/
nanmedian_kerne
l.h"
#include "paddle/phi/kernels/
impl/nanmedian_kernel_imp
l.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace
phi
{
...
...
paddle/phi/kernels/impl/nanmedian_grad_kernel_impl.h
0 → 100644
浏览文件 @
b23914c2
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/nanmedian_grad_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
PostprocessMedianGradKernel
(
const
Context
&
dev_ctx
,
DenseTensor
*
input
,
const
IntArray
&
raw_axes
,
DenseTensor
*
x
)
{
auto
input_dim
=
input
->
dims
();
auto
rank
=
input_dim
.
size
();
std
::
vector
<
int64_t
>
axes
=
raw_axes
.
GetData
();
int64_t
axes_size
=
static_cast
<
int
>
(
axes
.
size
());
for
(
int64_t
i
=
0
;
i
<
axes_size
;
i
++
)
{
if
(
axes
[
i
]
<
0
)
{
axes
[
i
]
+=
rank
;
}
}
std
::
vector
<
int
>
trans_back
;
std
::
vector
<
int
>
reshape_back
;
trans_back
.
reserve
(
rank
);
trans_back
.
resize
(
rank
);
int
offset
=
0
;
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
==
axes
.
end
())
{
reshape_back
.
push_back
(
input_dim
[
i
]);
trans_back
[
i
]
=
offset
;
offset
+=
1
;
}
}
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
!=
axes
.
end
())
{
trans_back
[
i
]
=
offset
;
reshape_back
.
push_back
(
input_dim
[
i
]);
offset
+=
1
;
}
}
input
->
Resize
(
make_ddim
(
reshape_back
));
funcs
::
TransCompute
<
Context
,
T
>
(
static_cast
<
int
>
(
trans_back
.
size
()),
dev_ctx
,
*
input
,
x
,
trans_back
);
}
}
// namespace phi
paddle/phi/kernels/impl/nanmedian_kernel_impl.h
0 → 100644
浏览文件 @
b23914c2
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/nanmedian_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
PreprocessMedianKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
input
,
const
IntArray
&
raw_axes
,
DenseTensor
*
x
)
{
auto
input_dim
=
input
.
dims
();
auto
rank
=
input_dim
.
size
();
std
::
vector
<
int
>
perm
;
std
::
vector
<
int64_t
>
reshape
;
std
::
vector
<
int64_t
>
axes
=
raw_axes
.
GetData
();
int64_t
axes_size
=
static_cast
<
int
>
(
axes
.
size
());
for
(
int64_t
i
=
0
;
i
<
axes_size
;
i
++
)
{
if
(
axes
[
i
]
<
0
)
{
axes
[
i
]
+=
rank
;
}
}
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
==
axes
.
end
())
{
perm
.
push_back
(
i
);
reshape
.
push_back
(
input_dim
[
i
]);
}
}
int64_t
post_numel
=
1
;
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
!=
axes
.
end
())
{
perm
.
push_back
(
i
);
post_numel
*=
input_dim
[
i
];
}
}
reshape
.
push_back
(
post_numel
);
DDim
trans_dim
(
input_dim
);
int
ndims
=
perm
.
size
();
for
(
int
i
=
0
;
i
<
ndims
;
i
++
)
{
trans_dim
[
i
]
=
input_dim
[
perm
[
i
]];
}
x
->
Resize
(
trans_dim
);
dev_ctx
.
template
Alloc
<
T
>(
x
);
funcs
::
TransCompute
<
Context
,
T
>
(
ndims
,
dev_ctx
,
input
,
x
,
perm
);
x
->
Resize
(
make_ddim
(
reshape
));
}
}
// namespace phi
paddle/phi/kernels/nanmedian_grad_kernel.h
浏览文件 @
b23914c2
...
...
@@ -13,55 +13,12 @@
// limitations under the License.
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
PostprocessMedianGradKernel
(
const
Context
&
dev_ctx
,
DenseTensor
*
input
,
const
IntArray
&
raw_axes
,
DenseTensor
*
x
)
{
auto
input_dim
=
input
->
dims
();
auto
rank
=
input_dim
.
size
();
std
::
vector
<
int64_t
>
axes
=
raw_axes
.
GetData
();
int64_t
axes_size
=
static_cast
<
int
>
(
axes
.
size
());
for
(
int64_t
i
=
0
;
i
<
axes_size
;
i
++
)
{
if
(
axes
[
i
]
<
0
)
{
axes
[
i
]
+=
rank
;
}
}
std
::
vector
<
int
>
trans_back
;
std
::
vector
<
int
>
reshape_back
;
trans_back
.
reserve
(
rank
);
trans_back
.
resize
(
rank
);
int
offset
=
0
;
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
==
axes
.
end
())
{
reshape_back
.
push_back
(
input_dim
[
i
]);
trans_back
[
i
]
=
offset
;
offset
+=
1
;
}
}
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
!=
axes
.
end
())
{
trans_back
[
i
]
=
offset
;
reshape_back
.
push_back
(
input_dim
[
i
]);
offset
+=
1
;
}
}
input
->
Resize
(
make_ddim
(
reshape_back
));
funcs
::
TransCompute
<
Context
,
T
>
(
static_cast
<
int
>
(
trans_back
.
size
()),
dev_ctx
,
*
input
,
x
,
trans_back
);
}
template
<
typename
T
,
typename
Context
>
void
NanmedianGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
...
...
paddle/phi/kernels/nanmedian_kernel.h
浏览文件 @
b23914c2
...
...
@@ -13,58 +13,12 @@
// limitations under the License.
#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
PreprocessMedianKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
input
,
const
IntArray
&
raw_axes
,
DenseTensor
*
x
)
{
auto
input_dim
=
input
.
dims
();
auto
rank
=
input_dim
.
size
();
std
::
vector
<
int
>
perm
;
std
::
vector
<
int64_t
>
reshape
;
std
::
vector
<
int64_t
>
axes
=
raw_axes
.
GetData
();
int64_t
axes_size
=
static_cast
<
int
>
(
axes
.
size
());
for
(
int64_t
i
=
0
;
i
<
axes_size
;
i
++
)
{
if
(
axes
[
i
]
<
0
)
{
axes
[
i
]
+=
rank
;
}
}
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
==
axes
.
end
())
{
perm
.
push_back
(
i
);
reshape
.
push_back
(
input_dim
[
i
]);
}
}
int64_t
post_numel
=
1
;
for
(
int64_t
i
=
0
;
i
<
rank
;
i
++
)
{
if
(
std
::
find
(
axes
.
begin
(),
axes
.
end
(),
i
)
!=
axes
.
end
())
{
perm
.
push_back
(
i
);
post_numel
*=
input_dim
[
i
];
}
}
reshape
.
push_back
(
post_numel
);
DDim
trans_dim
(
input_dim
);
int
ndims
=
perm
.
size
();
for
(
int
i
=
0
;
i
<
ndims
;
i
++
)
{
trans_dim
[
i
]
=
input_dim
[
perm
[
i
]];
}
x
->
Resize
(
trans_dim
);
dev_ctx
.
template
Alloc
<
T
>(
x
);
funcs
::
TransCompute
<
Context
,
T
>
(
ndims
,
dev_ctx
,
input
,
x
,
perm
);
x
->
Resize
(
make_ddim
(
reshape
));
}
template
<
typename
T
,
typename
Context
>
void
NanmedianKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录