Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
affe25b7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
affe25b7
编写于
6月 13, 2022
作者:
C
Chenxiao Niu
提交者:
GitHub
6月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add mlu interp_v2(nearest&bilinear). (#43383)
上级
31ddaae2
变更
4
展开全部
隐藏空白更改
内联
并排
Showing
4 changed file
with
528 addition
and
5 deletion
+528
-5
paddle/fluid/operators/interpolate_op.h
paddle/fluid/operators/interpolate_op.h
+4
-2
paddle/fluid/operators/interpolate_v2_op_mlu.cc
paddle/fluid/operators/interpolate_v2_op_mlu.cc
+488
-0
paddle/fluid/operators/mlu/mlu_baseop.cc
paddle/fluid/operators/mlu/mlu_baseop.cc
+3
-3
paddle/fluid/operators/mlu/mlu_baseop.h
paddle/fluid/operators/mlu/mlu_baseop.h
+33
-0
未找到文件。
paddle/fluid/operators/interpolate_op.h
浏览文件 @
affe25b7
...
@@ -38,7 +38,8 @@ inline std::vector<int> get_new_shape(
...
@@ -38,7 +38,8 @@ inline std::vector<int> get_new_shape(
"The shape of dimension tensor should be [1],"
"The shape of dimension tensor should be [1],"
"but received d%."
,
"but received d%."
,
tensor
->
dims
()));
tensor
->
dims
()));
if
(
platform
::
is_gpu_place
(
tensor
->
place
()))
{
if
(
platform
::
is_gpu_place
(
tensor
->
place
())
||
platform
::
is_mlu_place
(
tensor
->
place
()))
{
framework
::
Tensor
temp
;
framework
::
Tensor
temp
;
paddle
::
framework
::
TensorCopySync
(
*
tensor
,
platform
::
CPUPlace
(),
&
temp
);
paddle
::
framework
::
TensorCopySync
(
*
tensor
,
platform
::
CPUPlace
(),
&
temp
);
vec_new_shape
.
push_back
(
static_cast
<
int32_t
>
(
*
temp
.
data
<
int32_t
>
()));
vec_new_shape
.
push_back
(
static_cast
<
int32_t
>
(
*
temp
.
data
<
int32_t
>
()));
...
@@ -55,7 +56,8 @@ inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
...
@@ -55,7 +56,8 @@ inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std
::
vector
<
T
>
vec_new_data
;
std
::
vector
<
T
>
vec_new_data
;
auto
*
new_data
=
new_data_tensor
->
data
<
T
>
();
auto
*
new_data
=
new_data_tensor
->
data
<
T
>
();
framework
::
Tensor
cpu_starts_tensor
;
framework
::
Tensor
cpu_starts_tensor
;
if
(
platform
::
is_gpu_place
(
new_data_tensor
->
place
()))
{
if
(
platform
::
is_gpu_place
(
new_data_tensor
->
place
())
||
platform
::
is_mlu_place
(
new_data_tensor
->
place
()))
{
paddle
::
framework
::
TensorCopySync
(
*
new_data_tensor
,
platform
::
CPUPlace
(),
paddle
::
framework
::
TensorCopySync
(
*
new_data_tensor
,
platform
::
CPUPlace
(),
&
cpu_starts_tensor
);
&
cpu_starts_tensor
);
new_data
=
cpu_starts_tensor
.
data
<
T
>
();
new_data
=
cpu_starts_tensor
.
data
<
T
>
();
...
...
paddle/fluid/operators/interpolate_v2_op_mlu.cc
0 → 100644
浏览文件 @
affe25b7
此差异已折叠。
点击以展开。
paddle/fluid/operators/mlu/mlu_baseop.cc
浏览文件 @
affe25b7
...
@@ -1925,9 +1925,9 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
...
@@ -1925,9 +1925,9 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
)
{
const
cnnlTensorDescriptor_t
output_desc
,
void
*
output
)
{
cnnlHandle_t
handle
=
GetHandleFromCTX
(
ctx
);
cnnlHandle_t
handle
=
GetHandleFromCTX
(
ctx
);
PADDLE_ENFORCE_MLU_SUCCESS
(
PADDLE_ENFORCE_MLU_SUCCESS
(
cnnlInterpBackward_v2
(
cnnlInterpBackward
(
handle
,
align_corners
,
half_pixel_centers
,
mode
,
handle
,
align_corners
,
half_pixel_centers
,
mode
,
NULL
,
true
,
input_desc
,
input_desc
,
input
,
output_desc
,
output
));
input
,
output_desc
,
output
));
}
}
/* static */
void
MLUCnnl
::
Cast
(
const
ExecutionContext
&
ctx
,
/* static */
void
MLUCnnl
::
Cast
(
const
ExecutionContext
&
ctx
,
...
...
paddle/fluid/operators/mlu/mlu_baseop.h
浏览文件 @
affe25b7
...
@@ -41,6 +41,20 @@ const std::map<std::string, cnnlReduceOp_t> MLUReduceOpMap = {
...
@@ -41,6 +41,20 @@ const std::map<std::string, cnnlReduceOp_t> MLUReduceOpMap = {
{
"reduce_prod"
,
CNNL_REDUCE_MUL
},
{
"reduce_prod"
,
CNNL_REDUCE_MUL
},
};
};
const
std
::
map
<
std
::
string
,
cnnlInterpMode_t
>
MLUInterpModeMap
=
{
{
"bilinear"
,
CNNL_INTERP_BILINEAR
},
{
"nearest"
,
CNNL_INTERP_NEAREST
},
{
"linear"
,
CNNL_INTERP_LINEAR
},
{
"trilinear"
,
CNNL_INTERP_TRILINEAR
},
{
"bicubic"
,
CNNL_INTERP_BICUBIC
}};
const
std
::
map
<
std
::
string
,
cnnlInterpBackwardMode_t
>
MLUInterpBackwardModeMap
=
{{
"bilinear"
,
CNNL_INTERP_BACKWARD_BILINEAR
},
{
"nearest"
,
CNNL_INTERP_BACKWARD_NEAREST
},
{
"linear"
,
CNNL_INTERP_BACKWARD_LINEAR
},
{
"trilinear"
,
CNNL_INTERP_BACKWARD_TRILINEAR
},
{
"bicubic"
,
CNNL_INTERP_BACKWARD_BICUBIC
}};
inline
cnnlReduceOp_t
GetMLUCnnlReduceOp
(
const
std
::
string
reduce_name
)
{
inline
cnnlReduceOp_t
GetMLUCnnlReduceOp
(
const
std
::
string
reduce_name
)
{
auto
iter
=
MLUReduceOpMap
.
find
(
reduce_name
);
auto
iter
=
MLUReduceOpMap
.
find
(
reduce_name
);
if
(
iter
!=
MLUReduceOpMap
.
end
())
{
if
(
iter
!=
MLUReduceOpMap
.
end
())
{
...
@@ -50,6 +64,25 @@ inline cnnlReduceOp_t GetMLUCnnlReduceOp(const std::string reduce_name) {
...
@@ -50,6 +64,25 @@ inline cnnlReduceOp_t GetMLUCnnlReduceOp(const std::string reduce_name) {
"Not support reduce op type of MLU Device: %s"
,
reduce_name
));
"Not support reduce op type of MLU Device: %s"
,
reduce_name
));
}
}
inline
cnnlInterpMode_t
GetMLUCnnlInterpMode
(
const
std
::
string
interp_mode
)
{
auto
iter
=
MLUInterpModeMap
.
find
(
interp_mode
);
if
(
iter
!=
MLUInterpModeMap
.
end
())
{
return
iter
->
second
;
}
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Not support interp mode of MLU Device: %s"
,
interp_mode
));
}
inline
cnnlInterpBackwardMode_t
GetMLUCnnlInterpBackwardMode
(
const
std
::
string
interp_mode
)
{
auto
iter
=
MLUInterpBackwardModeMap
.
find
(
interp_mode
);
if
(
iter
!=
MLUInterpBackwardModeMap
.
end
())
{
return
iter
->
second
;
}
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Not support interp mode of MLU Device: %s"
,
interp_mode
));
}
inline
const
void
*
GetBasePtr
(
const
Tensor
*
t
)
{
return
t
->
data
();
}
inline
const
void
*
GetBasePtr
(
const
Tensor
*
t
)
{
return
t
->
data
();
}
inline
void
*
GetBasePtr
(
Tensor
*
t
)
{
return
t
->
data
();
}
inline
void
*
GetBasePtr
(
Tensor
*
t
)
{
return
t
->
data
();
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录