Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
399047d7
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
399047d7
编写于
11月 01, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
11月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PHI]Standardise some C++ API (Part2) (#47510)
* standard_api * add hardtanh
上级
957fbb02
变更
84
展开全部
隐藏空白更改
内联
并排
Showing
84 changed file
with
1437 addition
and
1129 deletion
+1437
-1129
paddle/fluid/framework/new_executor/standalone_executor_test.cc
.../fluid/framework/new_executor/standalone_executor_test.cc
+2
-2
paddle/fluid/operators/detection/yolov3_loss_op.cc
paddle/fluid/operators/detection/yolov3_loss_op.cc
+2
-2
paddle/fluid/operators/hierarchical_sigmoid_op.cc
paddle/fluid/operators/hierarchical_sigmoid_op.cc
+1
-1
paddle/fluid/operators/tril_triu_op.cc
paddle/fluid/operators/tril_triu_op.cc
+1
-1
paddle/fluid/operators/where_index_op.cc
paddle/fluid/operators/where_index_op.cc
+1
-1
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+23
-23
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+46
-46
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+18
-18
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+18
-18
paddle/phi/infermeta/multiary.cc
paddle/phi/infermeta/multiary.cc
+30
-30
paddle/phi/infermeta/multiary.h
paddle/phi/infermeta/multiary.h
+30
-30
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+58
-58
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+8
-8
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+277
-277
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+29
-29
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
+0
-71
paddle/phi/kernels/cpu/hsigmoid_loss_grad.h
paddle/phi/kernels/cpu/hsigmoid_loss_grad.h
+20
-21
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
+71
-0
paddle/phi/kernels/cpu/hsigmoid_loss_kernel.cc
paddle/phi/kernels/cpu/hsigmoid_loss_kernel.cc
+20
-24
paddle/phi/kernels/cpu/nonzero_kernel.cc
paddle/phi/kernels/cpu/nonzero_kernel.cc
+6
-6
paddle/phi/kernels/cpu/prod_grad_kernel.cc
paddle/phi/kernels/cpu/prod_grad_kernel.cc
+3
-3
paddle/phi/kernels/cpu/prod_kernel.cc
paddle/phi/kernels/cpu/prod_kernel.cc
+1
-1
paddle/phi/kernels/cpu/tril_grad_kernel.cc
paddle/phi/kernels/cpu/tril_grad_kernel.cc
+3
-3
paddle/phi/kernels/cpu/tril_kernel.cc
paddle/phi/kernels/cpu/tril_kernel.cc
+3
-3
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
+12
-12
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
+12
-12
paddle/phi/kernels/cpu/uniform_kernel.cc
paddle/phi/kernels/cpu/uniform_kernel.cc
+13
-13
paddle/phi/kernels/cpu/yolo_loss_functor.h
paddle/phi/kernels/cpu/yolo_loss_functor.h
+0
-0
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
+23
-27
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
+18
-18
paddle/phi/kernels/gpu/lstsq_kernel.cu
paddle/phi/kernels/gpu/lstsq_kernel.cu
+3
-3
paddle/phi/kernels/gpu/nonzero_kernel.cu
paddle/phi/kernels/gpu/nonzero_kernel.cu
+6
-6
paddle/phi/kernels/gpu/prod_grad_kernel.cu
paddle/phi/kernels/gpu/prod_grad_kernel.cu
+3
-3
paddle/phi/kernels/gpu/qr_kernel.cu
paddle/phi/kernels/gpu/qr_kernel.cu
+3
-3
paddle/phi/kernels/gpu/tril_grad_kernel.cu
paddle/phi/kernels/gpu/tril_grad_kernel.cu
+3
-3
paddle/phi/kernels/gpu/tril_kernel.cu
paddle/phi/kernels/gpu/tril_kernel.cu
+3
-3
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
+12
-12
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
+12
-12
paddle/phi/kernels/gpu/uniform_kernel.cu
paddle/phi/kernels/gpu/uniform_kernel.cu
+13
-13
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
+0
-42
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
+0
-40
paddle/phi/kernels/hsigmoid_loss_grad_kernel.h
paddle/phi/kernels/hsigmoid_loss_grad_kernel.h
+42
-0
paddle/phi/kernels/hsigmoid_loss_kernel.h
paddle/phi/kernels/hsigmoid_loss_kernel.h
+40
-0
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
+9
-9
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
+3
-3
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
+6
-6
paddle/phi/kernels/impl/tril_kernel_impl.h
paddle/phi/kernels/impl/tril_kernel_impl.h
+6
-6
paddle/phi/kernels/kps/prod_kernel.cu
paddle/phi/kernels/kps/prod_kernel.cu
+1
-1
paddle/phi/kernels/nonzero_kernel.h
paddle/phi/kernels/nonzero_kernel.h
+3
-3
paddle/phi/kernels/prod_grad_kernel.h
paddle/phi/kernels/prod_grad_kernel.h
+8
-8
paddle/phi/kernels/prod_kernel.cc
paddle/phi/kernels/prod_kernel.cc
+1
-1
paddle/phi/kernels/prod_kernel.h
paddle/phi/kernels/prod_kernel.h
+0
-0
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.cc
...le/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.cc
+99
-0
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.h
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.h
+45
-0
paddle/phi/kernels/selected_rows/uniform_kernel.cc
paddle/phi/kernels/selected_rows/uniform_kernel.cc
+96
-0
paddle/phi/kernels/selected_rows/uniform_kernel.h
paddle/phi/kernels/selected_rows/uniform_kernel.h
+17
-17
paddle/phi/kernels/tril_grad_kernel.h
paddle/phi/kernels/tril_grad_kernel.h
+5
-5
paddle/phi/kernels/tril_kernel.h
paddle/phi/kernels/tril_kernel.h
+11
-11
paddle/phi/kernels/uniform_inplace_grad_kernel.h
paddle/phi/kernels/uniform_inplace_grad_kernel.h
+9
-9
paddle/phi/kernels/uniform_inplace_kernel.h
paddle/phi/kernels/uniform_inplace_kernel.h
+9
-9
paddle/phi/kernels/uniform_kernel.cc
paddle/phi/kernels/uniform_kernel.cc
+14
-16
paddle/phi/kernels/uniform_kernel.h
paddle/phi/kernels/uniform_kernel.h
+17
-17
paddle/phi/kernels/xpu/nonzero_kernel.cc
paddle/phi/kernels/xpu/nonzero_kernel.cc
+5
-5
paddle/phi/kernels/xpu/prod_kernel.cc
paddle/phi/kernels/xpu/prod_kernel.cc
+1
-1
paddle/phi/kernels/xpu/tril_grad_kernel.cc
paddle/phi/kernels/xpu/tril_grad_kernel.cc
+7
-7
paddle/phi/kernels/xpu/tril_kernel.cc
paddle/phi/kernels/xpu/tril_kernel.cc
+7
-8
paddle/phi/kernels/xpu/uniform_kernel.cc
paddle/phi/kernels/xpu/uniform_kernel.cc
+13
-13
paddle/phi/kernels/yolo_loss_grad_kernel.h
paddle/phi/kernels/yolo_loss_grad_kernel.h
+42
-0
paddle/phi/kernels/yolo_loss_kernel.h
paddle/phi/kernels/yolo_loss_kernel.h
+38
-0
paddle/phi/kernels/yolov3_loss_kernel.h
paddle/phi/kernels/yolov3_loss_kernel.h
+0
-38
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
+6
-3
paddle/phi/ops/compat/tril_triu_sig.cc
paddle/phi/ops/compat/tril_triu_sig.cc
+5
-2
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
+4
-2
paddle/phi/ops/compat/uniform_random_sig.cc
paddle/phi/ops/compat/uniform_random_sig.cc
+15
-15
paddle/phi/ops/compat/where_index_sig.cc
paddle/phi/ops/compat/where_index_sig.cc
+27
-0
paddle/phi/ops/compat/yolov3_loss_sig.cc
paddle/phi/ops/compat/yolov3_loss_sig.cc
+5
-2
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+3
-3
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+3
-3
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+1
-1
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+2
-2
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+1
-1
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+2
-2
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+1
-1
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+1
-1
未找到文件。
paddle/fluid/framework/new_executor/standalone_executor_test.cc
浏览文件 @
399047d7
...
@@ -63,8 +63,8 @@ USE_OP_ITSELF(memcpy_d2h);
...
@@ -63,8 +63,8 @@ USE_OP_ITSELF(memcpy_d2h);
USE_OP_ITSELF
(
fetch_v2
);
USE_OP_ITSELF
(
fetch_v2
);
PD_DECLARE_KERNEL
(
full
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
full
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform_ra
ndom_ra
w
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform_raw
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform
_random
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
transpose
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
transpose
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
reshape
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
reshape
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
split
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
split
,
GPU
,
ALL_LAYOUT
);
...
...
paddle/fluid/operators/detection/yolov3_loss_op.cc
浏览文件 @
399047d7
...
@@ -218,10 +218,10 @@ class Yolov3LossGradMaker : public framework::SingleGradOpMaker<T> {
...
@@ -218,10 +218,10 @@ class Yolov3LossGradMaker : public framework::SingleGradOpMaker<T> {
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss
,
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss
,
Yolov3LossInferShapeFunctor
,
Yolov3LossInferShapeFunctor
,
PD_INFER_META
(
phi
::
Yolo
v3
LossInferMeta
));
PD_INFER_META
(
phi
::
YoloLossInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss_grad
,
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss_grad
,
Yolov3LossGradInferShapeFunctor
,
Yolov3LossGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
Yolo
v3
LossGradInferMeta
));
PD_INFER_META
(
phi
::
YoloLossGradInferMeta
));
REGISTER_OPERATOR
(
yolov3_loss
,
REGISTER_OPERATOR
(
yolov3_loss
,
ops
::
Yolov3LossOp
,
ops
::
Yolov3LossOp
,
ops
::
Yolov3LossOpMaker
,
ops
::
Yolov3LossOpMaker
,
...
...
paddle/fluid/operators/hierarchical_sigmoid_op.cc
浏览文件 @
399047d7
...
@@ -259,7 +259,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(
...
@@ -259,7 +259,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
hierarchical_sigmoid
,
DECLARE_INFER_SHAPE_FUNCTOR
(
hierarchical_sigmoid
,
HierarchicalSigmoidInferShapeFunctor
,
HierarchicalSigmoidInferShapeFunctor
,
PD_INFER_META
(
phi
::
H
ierarchicalSigmoid
InferMeta
));
PD_INFER_META
(
phi
::
H
SigmoidLoss
InferMeta
));
REGISTER_OPERATOR
(
hierarchical_sigmoid
,
REGISTER_OPERATOR
(
hierarchical_sigmoid
,
ops
::
HierarchicalSigmoidOp
,
ops
::
HierarchicalSigmoidOp
,
ops
::
HierarchicalSigmoidOpMaker
<
int
>
,
ops
::
HierarchicalSigmoidOpMaker
<
int
>
,
...
...
paddle/fluid/operators/tril_triu_op.cc
浏览文件 @
399047d7
...
@@ -93,7 +93,7 @@ namespace ops = paddle::operators;
...
@@ -93,7 +93,7 @@ namespace ops = paddle::operators;
namespace
plat
=
paddle
::
platform
;
namespace
plat
=
paddle
::
platform
;
DECLARE_INFER_SHAPE_FUNCTOR
(
tril_triu
,
DECLARE_INFER_SHAPE_FUNCTOR
(
tril_triu
,
TrilTriuInferShapeFunctor
,
TrilTriuInferShapeFunctor
,
PD_INFER_META
(
phi
::
Tril
Triu
InferMeta
));
PD_INFER_META
(
phi
::
TrilInferMeta
));
REGISTER_OPERATOR
(
tril_triu
,
REGISTER_OPERATOR
(
tril_triu
,
ops
::
TrilTriuOp
,
ops
::
TrilTriuOp
,
ops
::
TrilTriuOpMaker
,
ops
::
TrilTriuOpMaker
,
...
...
paddle/fluid/operators/where_index_op.cc
浏览文件 @
399047d7
...
@@ -48,7 +48,7 @@ class WhereIndexOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -48,7 +48,7 @@ class WhereIndexOpMaker : public framework::OpProtoAndCheckerMaker {
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
where_index
,
DECLARE_INFER_SHAPE_FUNCTOR
(
where_index
,
WhereIndexInferShapeFunctor
,
WhereIndexInferShapeFunctor
,
PD_INFER_META
(
phi
::
WhereIndex
InferMeta
));
PD_INFER_META
(
phi
::
NonZero
InferMeta
));
REGISTER_OPERATOR
(
REGISTER_OPERATOR
(
where_index
,
where_index
,
ops
::
WhereIndexOp
,
ops
::
WhereIndexOp
,
...
...
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
399047d7
...
@@ -791,8 +791,8 @@
...
@@ -791,8 +791,8 @@
func
:
hard_tanh_grad
func
:
hard_tanh_grad
inplace
:
(out_grad -> x_grad)
inplace
:
(out_grad -> x_grad)
-
backward_op
:
h
ierarchical_sigmoid
_grad
-
backward_op
:
h
sigmoid_loss
_grad
forward
:
h
ierarchical_sigmoid
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
forward
:
h
sigmoid_loss
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
output
:
Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta
:
infer_meta
:
...
@@ -800,7 +800,7 @@
...
@@ -800,7 +800,7 @@
param
:
[
x
,
w
,
bias
]
param
:
[
x
,
w
,
bias
]
optional
:
path, code, bias
optional
:
path, code, bias
kernel
:
kernel
:
func
:
h
ierarchical_sigmoid
_grad
func
:
h
sigmoid_loss
_grad
-
backward_op
:
huber_loss_grad
-
backward_op
:
huber_loss_grad
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
...
@@ -1477,6 +1477,16 @@
...
@@ -1477,6 +1477,16 @@
kernel
:
kernel
:
func
:
prelu_grad
func
:
prelu_grad
-
backward_op
:
prod_grad
forward
:
prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_op
:
psroi_pool_grad
-
backward_op
:
psroi_pool_grad
forward
:
psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
forward
:
psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
...
@@ -1516,16 +1526,6 @@
...
@@ -1516,16 +1526,6 @@
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
invoke
:
real_grad_impl(out_grad, x_grad)
invoke
:
real_grad_impl(out_grad, x_grad)
-
backward_op
:
reduce_prod_grad
forward
:
reduce_prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_op
:
relu6_grad
-
backward_op
:
relu6_grad
forward
:
relu6 (Tensor x, float threshold) -> Tensor(out)
forward
:
relu6 (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float threshold)
args
:
(Tensor out, Tensor out_grad, float threshold)
...
@@ -2234,15 +2234,15 @@
...
@@ -2234,15 +2234,15 @@
kernel
:
kernel
:
func
:
triangular_solve_grad
func
:
triangular_solve_grad
-
backward_op
:
tril_
triu_
grad
-
backward_op
:
tril_grad
forward
:
tril
_triu
(Tensor x, int diagonal, bool lower) -> Tensor(out)
forward
:
tril(Tensor x, int diagonal, bool lower) -> Tensor(out)
args
:
(Tensor out_grad, int diagonal, bool lower)
args
:
(Tensor out_grad, int diagonal, bool lower)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out_grad
]
param
:
[
out_grad
]
kernel
:
kernel
:
func
:
tril_
triu_
grad
func
:
tril_grad
-
backward_op
:
trilinear_interp_grad
-
backward_op
:
trilinear_interp_grad
forward
:
trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
forward
:
trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
...
@@ -2273,14 +2273,14 @@
...
@@ -2273,14 +2273,14 @@
func
:
unfold_grad
func
:
unfold_grad
no_need_buffer
:
x
no_need_buffer
:
x
-
backward_op
:
uniform_
random_
inplace_grad
-
backward_op
:
uniform_inplace_grad
forward
:
uniform_
random_
inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
forward
:
uniform_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
args
:
(Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
args
:
(Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UniformRandomInplaceGradInferMeta
func
:
UniformRandomInplaceGradInferMeta
kernel
:
kernel
:
func
:
uniform_
random_
inplace_grad
func
:
uniform_inplace_grad
inplace
:
(out_grad -> x_grad)
inplace
:
(out_grad -> x_grad)
-
backward_op
:
unsqueeze_double_grad
-
backward_op
:
unsqueeze_double_grad
...
@@ -2335,14 +2335,14 @@
...
@@ -2335,14 +2335,14 @@
func
:
where_grad
func
:
where_grad
no_need_buffer
:
x, y
no_need_buffer
:
x, y
-
backward_op
:
yolo
v3
_loss_grad
-
backward_op
:
yolo_loss_grad
forward
:
yolo
v3
_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
forward
:
yolo_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output
:
Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad)
output
:
Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad)
infer_meta
:
infer_meta
:
func
:
Yolo
v3
LossGradInferMeta
func
:
YoloLossGradInferMeta
kernel
:
kernel
:
func
:
yolo
v3
_loss_grad
func
:
yolo_loss_grad
optional
:
gt_score
optional
:
gt_score
-
backward_op
:
fold_grad
-
backward_op
:
fold_grad
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
399047d7
...
@@ -1036,17 +1036,6 @@
...
@@ -1036,17 +1036,6 @@
func
:
hard_tanh
func
:
hard_tanh
backward
:
hardtanh_grad
backward
:
hardtanh_grad
-
op
:
hierarchical_sigmoid
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta
:
func
:
HierarchicalSigmoidInferMeta
optional
:
path, code, bias
kernel
:
func
:
hierarchical_sigmoid
data_type
:
x
backward
:
hierarchical_sigmoid_grad
-
op
:
histogram
-
op
:
histogram
args
:
(Tensor input, int64_t bins, int min, int max)
args
:
(Tensor input, int64_t bins, int min, int max)
output
:
Tensor(out)
output
:
Tensor(out)
...
@@ -1055,6 +1044,17 @@
...
@@ -1055,6 +1044,17 @@
kernel
:
kernel
:
func
:
histogram
func
:
histogram
-
op
:
hsigmoid_loss
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta
:
func
:
HSigmoidLossInferMeta
optional
:
path, code, bias
kernel
:
func
:
hsigmoid_loss
data_type
:
x
backward
:
hsigmoid_loss_grad
-
op
:
huber_loss
-
op
:
huber_loss
args
:
(Tensor input, Tensor label, float delta)
args
:
(Tensor input, Tensor label, float delta)
output
:
Tensor(out), Tensor(residual)
output
:
Tensor(out), Tensor(residual)
...
@@ -1696,6 +1696,14 @@
...
@@ -1696,6 +1696,14 @@
func
:
nms
func
:
nms
data_type
:
x
data_type
:
x
-
op
:
nonzero
args
:
(Tensor condition)
output
:
Tensor(out)
infer_meta
:
func
:
NonZeroInferMeta
kernel
:
func
:
nonzero
-
op
:
norm
-
op
:
norm
args
:
(Tensor x, int axis, float epsilon, bool is_test)
args
:
(Tensor x, int axis, float epsilon, bool is_test)
output
:
Tensor(out), Tensor(norm)
output
:
Tensor(out), Tensor(norm)
...
@@ -1828,6 +1836,15 @@
...
@@ -1828,6 +1836,15 @@
kernel
:
kernel
:
func
:
prior_box
func
:
prior_box
-
op
:
prod
args
:
(Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceIntArrayAxisInferMetaBase
kernel
:
func
:
prod_raw
backward
:
prod_grad
-
op
:
psroi_pool
-
op
:
psroi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor
output
:
Tensor
...
@@ -1893,15 +1910,6 @@
...
@@ -1893,15 +1910,6 @@
func
:
real
func
:
real
backward
:
real_grad
backward
:
real_grad
-
op
:
reduce_prod
args
:
(Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceIntArrayAxisInferMetaBase
kernel
:
func
:
prod_raw
backward
:
reduce_prod_grad
-
op
:
relu
-
op
:
relu
args
:
(Tensor x)
args
:
(Tensor x)
output
:
Tensor(out)
output
:
Tensor(out)
...
@@ -2460,6 +2468,15 @@
...
@@ -2460,6 +2468,15 @@
func
:
triangular_solve
func
:
triangular_solve
backward
:
triangular_solve_grad
backward
:
triangular_solve_grad
-
op
:
tril
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilInferMeta
kernel
:
func
:
tril
backward
:
tril_grad
-
op
:
tril_indices
-
op
:
tril_indices
args
:
(int rows, int cols, int offset, DataType dtype, Place place={})
args
:
(int rows, int cols, int offset, DataType dtype, Place place={})
output
:
Tensor(out)
output
:
Tensor(out)
...
@@ -2472,15 +2489,6 @@
...
@@ -2472,15 +2489,6 @@
data_type
:
dtype
data_type
:
dtype
backend
:
place
backend
:
place
-
op
:
tril_triu
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilTriuInferMeta
kernel
:
func
:
tril_triu
backward
:
tril_triu_grad
-
op
:
trilinear_interp
-
op
:
trilinear_interp
args
:
(Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
args
:
(Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output
:
Tensor(output)
output
:
Tensor(output)
...
@@ -2535,14 +2543,14 @@
...
@@ -2535,14 +2543,14 @@
func
:
unfold
func
:
unfold
backward
:
unfold_grad
backward
:
unfold_grad
-
op
:
uniform
_random
-
op
:
uniform
args
:
(IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
args
:
(IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
UniformRandomInferMeta
func
:
UniformRandomInferMeta
param
:
[
shape
,
dtype
]
param
:
[
shape
,
dtype
]
kernel
:
kernel
:
func
:
uniform
_random
func
:
uniform
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
data_type
:
dtype
data_type
:
dtype
backend
:
place
backend
:
place
...
@@ -2628,14 +2636,6 @@
...
@@ -2628,14 +2636,6 @@
func
:
where
func
:
where
backward
:
where_grad
backward
:
where_grad
-
op
:
where_index
args
:
(Tensor condition)
output
:
Tensor(out)
infer_meta
:
func
:
WhereIndexInferMeta
kernel
:
func
:
where_index
-
op
:
yolo_box
-
op
:
yolo_box
args
:
(Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
args
:
(Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
output
:
Tensor(boxes), Tensor(scores)
output
:
Tensor(boxes), Tensor(scores)
...
@@ -2645,16 +2645,16 @@
...
@@ -2645,16 +2645,16 @@
func
:
yolo_box
func
:
yolo_box
data_type
:
x
data_type
:
x
-
op
:
yolo
v3
_loss
-
op
:
yolo_loss
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output
:
Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
output
:
Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
infer_meta
:
infer_meta
:
func
:
Yolo
v3
LossInferMeta
func
:
YoloLossInferMeta
kernel
:
kernel
:
func
:
yolo
v3
_loss
func
:
yolo_loss
data_type
:
x
data_type
:
x
optional
:
gt_score
optional
:
gt_score
backward
:
yolo
v3
_loss_grad
backward
:
yolo_loss_grad
-
op
:
zeros
-
op
:
zeros
args
:
(IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
args
:
(IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
...
@@ -2734,16 +2734,16 @@
...
@@ -2734,16 +2734,16 @@
intermediate
:
reserve
intermediate
:
reserve
view
:
(dropout_state_in -> dropout_state_out)
view
:
(dropout_state_in -> dropout_state_out)
-
op
:
uniform_
random_
inplace
-
op
:
uniform_inplace
args
:
(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
args
:
(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
UniformRandomInplaceInferMeta
func
:
UniformRandomInplaceInferMeta
kernel
:
kernel
:
func
:
uniform_
random_
inplace
func
:
uniform_inplace
data_type
:
x
data_type
:
x
inplace
:
(x -> out)
inplace
:
(x -> out)
backward
:
uniform_
random_
inplace_grad
backward
:
uniform_inplace_grad
-
op
:
unpool
-
op
:
unpool
args
:
(Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
args
:
(Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
399047d7
...
@@ -987,24 +987,24 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
...
@@ -987,24 +987,24 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
x_grad
->
set_dtype
(
out_grad
[
0
]
->
dtype
());
x_grad
->
set_dtype
(
out_grad
[
0
]
->
dtype
());
}
}
void
Yolo
v3
LossGradInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
objectness_mask
,
const
MetaTensor
&
objectness_mask
,
const
MetaTensor
&
gt_match_mask
,
const
MetaTensor
&
gt_match_mask
,
const
MetaTensor
&
loss_grad
,
const
MetaTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
MetaTensor
*
x_grad
,
MetaTensor
*
x_grad
,
MetaTensor
*
gt_box_grad
,
MetaTensor
*
gt_box_grad
,
MetaTensor
*
gt_label_grad
,
MetaTensor
*
gt_label_grad
,
MetaTensor
*
gt_score_grad
)
{
MetaTensor
*
gt_score_grad
)
{
if
(
x_grad
)
{
if
(
x_grad
)
{
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
set_dtype
(
x
.
dtype
());
x_grad
->
set_dtype
(
x
.
dtype
());
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
399047d7
...
@@ -385,24 +385,24 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
...
@@ -385,24 +385,24 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int
axis
,
int
axis
,
MetaTensor
*
x_grad
);
MetaTensor
*
x_grad
);
void
Yolo
v3
LossGradInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
objectness_mask
,
const
MetaTensor
&
objectness_mask
,
const
MetaTensor
&
gt_match_mask
,
const
MetaTensor
&
gt_match_mask
,
const
MetaTensor
&
loss_grad
,
const
MetaTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
MetaTensor
*
x_grad
,
MetaTensor
*
x_grad
,
MetaTensor
*
gt_box_grad
,
MetaTensor
*
gt_box_grad
,
MetaTensor
*
gt_label_grad
,
MetaTensor
*
gt_label_grad
,
MetaTensor
*
gt_score_grad
);
MetaTensor
*
gt_score_grad
);
void
IndexAddGradInferMeta
(
const
MetaTensor
&
index
,
void
IndexAddGradInferMeta
(
const
MetaTensor
&
index
,
const
MetaTensor
&
add_value
,
const
MetaTensor
&
add_value
,
...
...
paddle/phi/infermeta/multiary.cc
浏览文件 @
399047d7
...
@@ -1328,22 +1328,22 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
...
@@ -1328,22 +1328,22 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
out_count
->
set_dtype
(
DataType
::
INT32
);
out_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
H
ierarchicalSigmoid
InferMeta
(
const
MetaTensor
&
x
,
void
H
SigmoidLoss
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
w
,
const
MetaTensor
&
w
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
const
MetaTensor
&
path
,
const
MetaTensor
&
path
,
const
MetaTensor
&
code
,
const
MetaTensor
&
code
,
const
MetaTensor
&
bias
,
const
MetaTensor
&
bias
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaTensor
*
pre_out
,
MetaTensor
*
pre_out
,
MetaTensor
*
w_out
)
{
MetaTensor
*
w_out
)
{
const
int64_t
input_dims
=
x
.
dims
()[
0
];
const
int64_t
input_dims
=
x
.
dims
()[
0
];
const
int64_t
label_dims
=
label
.
dims
()[
0
];
const
int64_t
label_dims
=
label
.
dims
()[
0
];
PADDLE_ENFORCE_EQ
(
input_dims
,
PADDLE_ENFORCE_EQ
(
input_dims
,
...
@@ -2762,20 +2762,20 @@ void WhereInferMeta(const MetaTensor& condition,
...
@@ -2762,20 +2762,20 @@ void WhereInferMeta(const MetaTensor& condition,
out
->
share_meta
(
x
);
out
->
share_meta
(
x
);
}
}
void
Yolo
v3
LossInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
MetaTensor
*
loss
,
MetaTensor
*
loss
,
MetaTensor
*
objectness_mask
,
MetaTensor
*
objectness_mask
,
MetaTensor
*
gt_match_mask
)
{
MetaTensor
*
gt_match_mask
)
{
auto
dim_x
=
x
.
dims
();
auto
dim_x
=
x
.
dims
();
auto
dim_gtbox
=
gt_box
.
dims
();
auto
dim_gtbox
=
gt_box
.
dims
();
auto
dim_gtlabel
=
gt_label
.
dims
();
auto
dim_gtlabel
=
gt_label
.
dims
();
...
...
paddle/phi/infermeta/multiary.h
浏览文件 @
399047d7
...
@@ -288,22 +288,22 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
...
@@ -288,22 +288,22 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
MetaTensor
*
out_count
,
MetaTensor
*
out_count
,
MetaTensor
*
out_eids
);
MetaTensor
*
out_eids
);
void
H
ierarchicalSigmoid
InferMeta
(
const
MetaTensor
&
x
,
void
H
SigmoidLoss
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
w
,
const
MetaTensor
&
w
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
const
MetaTensor
&
path
,
const
MetaTensor
&
path
,
const
MetaTensor
&
code
,
const
MetaTensor
&
code
,
const
MetaTensor
&
bias
,
const
MetaTensor
&
bias
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaTensor
*
pre_out
,
MetaTensor
*
pre_out
,
MetaTensor
*
w_out
);
MetaTensor
*
w_out
);
void
InterpolateInferMeta
(
void
InterpolateInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
x
,
...
@@ -508,19 +508,19 @@ void WhereInferMeta(const MetaTensor& condition,
...
@@ -508,19 +508,19 @@ void WhereInferMeta(const MetaTensor& condition,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
Yolo
v3
LossInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
const
MetaTensor
&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
MetaTensor
*
loss
,
MetaTensor
*
loss
,
MetaTensor
*
objectness_mask
,
MetaTensor
*
objectness_mask
,
MetaTensor
*
gt_match_mask
);
MetaTensor
*
gt_match_mask
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.cc
浏览文件 @
399047d7
...
@@ -402,64 +402,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
...
@@ -402,64 +402,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
}
}
}
}
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
dims_
=
phi
::
vectorize
(
dims
);
dims_
[
0
]
=
-
1
;
out
->
set_dims
(
phi
::
make_ddim
(
dims_
));
out
->
set_dtype
(
x
.
dtype
());
if
(
reduce_op
==
"MEAN"
)
{
dst_count
->
set_dims
({
-
1
});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
bias
,
const
MetaTensor
&
bias
,
...
@@ -1164,6 +1106,64 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
...
@@ -1164,6 +1106,64 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dtype
(
x
.
dtype
());
}
}
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
dims_
=
phi
::
vectorize
(
dims
);
dims_
[
0
]
=
-
1
;
out
->
set_dims
(
phi
::
make_ddim
(
dims_
));
out
->
set_dtype
(
x
.
dtype
());
if
(
reduce_op
==
"MEAN"
)
{
dst_count
->
set_dims
({
-
1
});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
const
MetaTensor
&
u
,
const
MetaTensor
&
u
,
const
MetaTensor
&
v
,
const
MetaTensor
&
v
,
...
...
paddle/phi/infermeta/ternary.h
浏览文件 @
399047d7
...
@@ -72,14 +72,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
...
@@ -72,14 +72,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
MetaTensor
*
saved_variance
,
MetaTensor
*
saved_variance
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
bias
,
const
MetaTensor
&
bias
,
...
@@ -186,6 +178,14 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
...
@@ -186,6 +178,14 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
const
MetaTensor
&
updates
,
const
MetaTensor
&
updates
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
const
MetaTensor
&
u
,
const
MetaTensor
&
u
,
const
MetaTensor
&
v
,
const
MetaTensor
&
v
,
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
399047d7
此差异已折叠。
点击以展开。
paddle/phi/infermeta/unary.h
浏览文件 @
399047d7
...
@@ -65,6 +65,11 @@ void BatchSizeLikeInferMeta(const MetaTensor& x,
...
@@ -65,6 +65,11 @@ void BatchSizeLikeInferMeta(const MetaTensor& x,
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
ChannelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
groups
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
void
ClassCenterSampleInferMeta
(
const
MetaTensor
&
label
,
void
ClassCenterSampleInferMeta
(
const
MetaTensor
&
label
,
...
@@ -191,6 +196,14 @@ void FlipInferMeta(const MetaTensor& x,
...
@@ -191,6 +196,14 @@ void FlipInferMeta(const MetaTensor& x,
const
std
::
vector
<
int
>&
axis
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
FoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
output_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
);
void
FrameInferMeta
(
const
MetaTensor
&
x
,
void
FrameInferMeta
(
const
MetaTensor
&
x
,
int
frame_length
,
int
frame_length
,
int
hop_length
,
int
hop_length
,
...
@@ -214,6 +227,8 @@ void GumbelSoftmaxInferMeta(const MetaTensor& x,
...
@@ -214,6 +227,8 @@ void GumbelSoftmaxInferMeta(const MetaTensor& x,
void
HistogramInferMeta
(
void
HistogramInferMeta
(
const
MetaTensor
&
input
,
int64_t
bins
,
int
min
,
int
max
,
MetaTensor
*
out
);
const
MetaTensor
&
input
,
int64_t
bins
,
int
min
,
int
max
,
MetaTensor
*
out
);
void
IdentityLossInferMeta
(
const
MetaTensor
&
x
,
int
reduction
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
@@ -288,6 +303,8 @@ void NanmedianInferMeta(const MetaTensor& x,
...
@@ -288,6 +303,8 @@ void NanmedianInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaTensor
*
median_index
);
MetaTensor
*
median_index
);
void
NonZeroInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
NMSInferMeta
(
const
MetaTensor
&
x
,
float
threshold
,
MetaTensor
*
out
);
void
NMSInferMeta
(
const
MetaTensor
&
x
,
float
threshold
,
MetaTensor
*
out
);
void
NormInferMeta
(
const
MetaTensor
&
x
,
void
NormInferMeta
(
const
MetaTensor
&
x
,
...
@@ -297,6 +314,14 @@ void NormInferMeta(const MetaTensor& x,
...
@@ -297,6 +314,14 @@ void NormInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaTensor
*
norm
);
MetaTensor
*
norm
);
void
OneHotRawInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
DataType
dtype
,
bool
allow_out_of_range
,
MetaTensor
*
out
);
void
OneHotInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
MetaTensor
*
out
);
void
OverlapAddInferMeta
(
const
MetaTensor
&
x
,
void
OverlapAddInferMeta
(
const
MetaTensor
&
x
,
int
hop_length
,
int
hop_length
,
int
axis
,
int
axis
,
...
@@ -576,10 +601,10 @@ void TransposeGradInferMeta(const MetaTensor& x,
...
@@ -576,10 +601,10 @@ void TransposeGradInferMeta(const MetaTensor& x,
const
std
::
vector
<
int
>&
axis
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
Tril
Triu
InferMeta
(
const
MetaTensor
&
x
,
void
TrilInferMeta
(
const
MetaTensor
&
x
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
int
axis
,
...
@@ -657,29 +682,4 @@ void UnStackInferMeta(const MetaTensor& x,
...
@@ -657,29 +682,4 @@ void UnStackInferMeta(const MetaTensor& x,
int
num
,
int
num
,
std
::
vector
<
MetaTensor
*>
outs
);
std
::
vector
<
MetaTensor
*>
outs
);
void
OneHotRawInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
DataType
dtype
,
bool
allow_out_of_range
,
MetaTensor
*
out
);
void
OneHotInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
MetaTensor
*
out
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ChannelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
groups
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IdentityLossInferMeta
(
const
MetaTensor
&
x
,
int
reduction
,
MetaTensor
*
out
);
void
FoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
output_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
)
{
HierarchicalSigmoidGradKernelImpl
<
T
>
(
ctx
,
x
,
w
,
label
,
path
,
code
,
bias
,
pre_out
,
out_grad
,
num_classes
,
remote_prefetch
,
trainer_id
,
height_sections
,
epmap
,
table_names
,
is_sparse
,
x_grad
,
w_grad
,
bias_grad
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
hierarchical_sigmoid_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
HierarchicalSigmoidGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_grad.h
→
paddle/phi/kernels/cpu/h
sigmoid_loss
_grad.h
浏览文件 @
399047d7
...
@@ -26,27 +26,26 @@ namespace phi {
...
@@ -26,27 +26,26 @@ namespace phi {
namespace
math
=
paddle
::
operators
::
math
;
namespace
math
=
paddle
::
operators
::
math
;
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernelImpl
(
void
HSigmoidLossGradKernelImpl
(
const
Context
&
ctx
,
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
,
DenseTensor
*
bias_grad
,
SelectedRows
*
w_grad_sr
=
nullptr
)
{
SelectedRows
*
w_grad_sr
=
nullptr
)
{
funcs
::
SetConstant
<
Context
,
T
>
zero
;
funcs
::
SetConstant
<
Context
,
T
>
zero
;
DenseTensor
pre_out_grad
;
DenseTensor
pre_out_grad
;
...
...
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hsigmoid_loss_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hsigmoid_loss_grad.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HSigmoidLossGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
)
{
HSigmoidLossGradKernelImpl
<
T
>
(
ctx
,
x
,
w
,
label
,
path
,
code
,
bias
,
pre_out
,
out_grad
,
num_classes
,
remote_prefetch
,
trainer_id
,
height_sections
,
epmap
,
table_names
,
is_sparse
,
x_grad
,
w_grad
,
bias_grad
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
hsigmoid_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
HSigmoidLossGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_kernel.cc
→
paddle/phi/kernels/cpu/h
sigmoid_loss
_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/h
ierarchical_sigmoid
_kernel.h"
#include "paddle/phi/kernels/h
sigmoid_loss
_kernel.h"
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include "paddle/fluid/platform/transform.h"
#include "paddle/fluid/platform/transform.h"
...
@@ -28,23 +28,23 @@ namespace phi {
...
@@ -28,23 +28,23 @@ namespace phi {
namespace
math
=
paddle
::
operators
::
math
;
namespace
math
=
paddle
::
operators
::
math
;
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
Kernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
DenseTensor
*
out
,
DenseTensor
*
out
,
DenseTensor
*
pre_out
,
DenseTensor
*
pre_out
,
DenseTensor
*
w_out
)
{
DenseTensor
*
w_out
)
{
size_t
num_classes_st
=
static_cast
<
size_t
>
(
num_classes
);
size_t
num_classes_st
=
static_cast
<
size_t
>
(
num_classes
);
// for remote prefetch
// for remote prefetch
...
@@ -106,9 +106,5 @@ void HierarchicalSigmoidKernel(const Context& ctx,
...
@@ -106,9 +106,5 @@ void HierarchicalSigmoidKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
hierarchical_sigmoid
,
PD_REGISTER_KERNEL
(
CPU
,
hsigmoid_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
HSigmoidLossKernel
,
float
,
double
)
{}
ALL_LAYOUT
,
phi
::
HierarchicalSigmoidKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/
where_index
_kernel.cc
→
paddle/phi/kernels/cpu/
nonzero
_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
@@ -47,9 +47,9 @@ struct WhereIndexFunctor {
...
@@ -47,9 +47,9 @@ struct WhereIndexFunctor {
};
};
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
const
T
*
cond_data
=
condition
.
data
<
T
>
();
const
T
*
cond_data
=
condition
.
data
<
T
>
();
auto
numel
=
condition
.
numel
();
auto
numel
=
condition
.
numel
();
auto
dims
=
condition
.
dims
();
auto
dims
=
condition
.
dims
();
...
@@ -83,10 +83,10 @@ void WhereIndexKernel(const Context& dev_ctx,
...
@@ -83,10 +83,10 @@ void WhereIndexKernel(const Context& dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
where_index
,
PD_REGISTER_KERNEL
(
nonzero
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
phi
::
NonZero
Kernel
,
int64_t
,
int64_t
,
int
,
int
,
int16_t
,
int16_t
,
...
...
paddle/phi/kernels/cpu/
reduce_
prod_grad_kernel.cc
→
paddle/phi/kernels/cpu/prod_grad_kernel.cc
浏览文件 @
399047d7
...
@@ -12,16 +12,16 @@
...
@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL
(
prod_grad
,
PD_REGISTER_KERNEL
(
prod_grad
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Reduce
ProdGradKernel
,
phi
::
ProdGradKernel
,
float
,
float
,
double
,
double
,
int
,
int
,
...
...
paddle/phi/kernels/cpu/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/cpu/prod_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
paddle/phi/kernels/cpu/tril_
triu
_kernel.cc
→
paddle/phi/kernels/cpu/tril_
grad
_kernel.cc
浏览文件 @
399047d7
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu
_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
grad
_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril_
triu
,
PD_REGISTER_KERNEL
(
tril_
grad
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Tril
Triu
Kernel
,
phi
::
Tril
Grad
Kernel
,
bool
,
bool
,
float
,
float
,
double
,
double
,
...
...
paddle/phi/kernels/cpu/tril_
triu_grad_
kernel.cc
→
paddle/phi/kernels/cpu/tril_kernel.cc
浏览文件 @
399047d7
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu_grad_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril
_triu_grad
,
PD_REGISTER_KERNEL
(
tril
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Tril
TriuGrad
Kernel
,
phi
::
TrilKernel
,
bool
,
bool
,
float
,
float
,
double
,
double
,
...
...
paddle/phi/kernels/cpu/uniform_
random_
inplace_grad_kernel.cc
→
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
浏览文件 @
399047d7
...
@@ -12,22 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,22 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_grad_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceGradKernel
(
const
Context
&
ctx
,
void
UniformInplaceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
x_grad
)
{
DenseTensor
*
x_grad
)
{
if
(
x_grad
)
{
if
(
x_grad
)
{
auto
*
data
=
ctx
.
template
Alloc
<
T
>(
x_grad
);
auto
*
data
=
ctx
.
template
Alloc
<
T
>(
x_grad
);
std
::
fill
(
data
,
data
+
x_grad
->
numel
(),
T
(
0
));
std
::
fill
(
data
,
data
+
x_grad
->
numel
(),
T
(
0
));
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace_grad
,
PD_REGISTER_KERNEL
(
uniform_inplace_grad
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceGradKernel
,
phi
::
UniformInplaceGradKernel
,
float
,
float
,
double
)
{}
double
)
{}
paddle/phi/kernels/cpu/uniform_
random_
inplace_kernel.cc
→
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
浏览文件 @
399047d7
...
@@ -12,22 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,22 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceKernel
(
const
Context
&
ctx
,
void
UniformInplaceKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
T
*
data
=
ctx
.
template
Alloc
<
T
>(
out
);
T
*
data
=
ctx
.
template
Alloc
<
T
>(
out
);
int64_t
size
=
out
->
numel
();
int64_t
size
=
out
->
numel
();
std
::
uniform_real_distribution
<
T
>
dist
(
static_cast
<
T
>
(
min
),
std
::
uniform_real_distribution
<
T
>
dist
(
static_cast
<
T
>
(
min
),
...
@@ -46,9 +46,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
...
@@ -46,9 +46,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace
,
PD_REGISTER_KERNEL
(
uniform_inplace
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceKernel
,
phi
::
UniformInplaceKernel
,
float
,
float
,
double
)
{}
double
)
{}
paddle/phi/kernels/cpu/uniform_
random_
kernel.cc
→
paddle/phi/kernels/cpu/uniform_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/uniform_real_distribution.h"
#include "paddle/phi/kernels/funcs/uniform_real_distribution.h"
...
@@ -20,16 +20,16 @@
...
@@ -20,16 +20,16 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
T
*
data
=
dev_ctx
.
template
Alloc
<
T
>(
out
);
T
*
data
=
dev_ctx
.
template
Alloc
<
T
>(
out
);
auto
size
=
out
->
numel
();
auto
size
=
out
->
numel
();
...
@@ -63,10 +63,10 @@ void UniformRandomRawKernel(const Context &dev_ctx,
...
@@ -63,10 +63,10 @@ void UniformRandomRawKernel(const Context &dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w
,
PD_REGISTER_KERNEL
(
uniform_raw
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
UniformRa
ndomRa
wKernel
,
phi
::
UniformRawKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{}
paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h
→
paddle/phi/kernels/cpu/yolo_loss_functor.h
浏览文件 @
399047d7
文件已移动
paddle/phi/kernels/cpu/yolo
v3
_loss_grad_kernel.cc
→
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
浏览文件 @
399047d7
...
@@ -12,14 +12,14 @@
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/yolo
v3
_loss_grad_kernel.h"
#include "paddle/phi/kernels/yolo_loss_grad_kernel.h"
#include <algorithm>
#include <algorithm>
#include <vector>
#include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h"
#include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
namespace
phi
{
...
@@ -117,25 +117,25 @@ static inline void CalcObjnessLossGrad(T* input_grad,
...
@@ -117,25 +117,25 @@ static inline void CalcObjnessLossGrad(T* input_grad,
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Yolo
v3
LossGradKernel
(
const
Context
&
dev_ctx
,
void
YoloLossGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
DenseTensor
&
objectness_mask
,
const
DenseTensor
&
objectness_mask
,
const
DenseTensor
&
gt_match_mask
,
const
DenseTensor
&
gt_match_mask
,
const
DenseTensor
&
loss_grad
,
const
DenseTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
gt_box_grad
,
DenseTensor
*
gt_box_grad
,
DenseTensor
*
gt_label_grad
,
DenseTensor
*
gt_label_grad
,
DenseTensor
*
gt_score_grad
)
{
DenseTensor
*
gt_score_grad
)
{
auto
*
input
=
&
x
;
auto
*
input
=
&
x
;
auto
input_grad
=
x_grad
;
auto
input_grad
=
x_grad
;
auto
*
objness_mask
=
&
objectness_mask
;
auto
*
objness_mask
=
&
objectness_mask
;
...
@@ -237,9 +237,5 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
...
@@ -237,9 +237,5 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
yolov3_loss_grad
,
PD_REGISTER_KERNEL
(
CPU
,
yolo_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
YoloLossGradKernel
,
float
,
double
)
{}
ALL_LAYOUT
,
phi
::
Yolov3LossGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/yolo
v3
_loss_kernel.cc
→
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
浏览文件 @
399047d7
...
@@ -12,14 +12,14 @@
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/yolo
v3
_loss_kernel.h"
#include "paddle/phi/kernels/yolo_loss_kernel.h"
#include <algorithm>
#include <algorithm>
#include <vector>
#include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h"
#include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
namespace
phi
{
...
@@ -178,21 +178,21 @@ static void inline GtValid(bool* valid,
...
@@ -178,21 +178,21 @@ static void inline GtValid(bool* valid,
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Yolo
v3
LossKernel
(
const
Context
&
dev_ctx
,
void
YoloLossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
int
class_num
,
float
ignore_thresh
,
float
ignore_thresh
,
int
downsample_ratio
,
int
downsample_ratio
,
bool
use_label_smooth
,
bool
use_label_smooth
,
float
scale_x_y
,
float
scale_x_y
,
DenseTensor
*
loss
,
DenseTensor
*
loss
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
gt_match_mask
)
{
DenseTensor
*
gt_match_mask
)
{
auto
*
input
=
&
x
;
auto
*
input
=
&
x
;
auto
objness_mask
=
objectness_mask
;
auto
objness_mask
=
objectness_mask
;
float
scale
=
scale_x_y
;
float
scale
=
scale_x_y
;
...
@@ -371,4 +371,4 @@ void Yolov3LossKernel(const Context& dev_ctx,
...
@@ -371,4 +371,4 @@ void Yolov3LossKernel(const Context& dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
yolo
v3_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
Yolov3
LossKernel
,
float
,
double
)
{}
yolo
_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
Yolo
LossKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/lstsq_kernel.cu
浏览文件 @
399047d7
...
@@ -23,7 +23,7 @@
...
@@ -23,7 +23,7 @@
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/impl/lstsq_kernel_impl.h"
#include "paddle/phi/kernels/impl/lstsq_kernel_impl.h"
#include "paddle/phi/kernels/impl/qr_kernel_impl.h"
#include "paddle/phi/kernels/impl/qr_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
triu_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
#include "paddle/phi/kernels/lstsq_kernel.h"
#include "paddle/phi/kernels/lstsq_kernel.h"
#include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
...
@@ -110,7 +110,7 @@ void LstsqKernel(const Context& dev_ctx,
...
@@ -110,7 +110,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor
*
res_r
=
new
DenseTensor
();
DenseTensor
*
res_r
=
new
DenseTensor
();
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
phi
::
Tril
Triu
Kernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TrilKernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
DenseTensor
trans_y
=
phi
::
TransposeLast2Dim
<
T
>
(
dev_ctx
,
tmp_y
);
DenseTensor
trans_y
=
phi
::
TransposeLast2Dim
<
T
>
(
dev_ctx
,
tmp_y
);
DenseTensor
slice_y
=
DenseTensor
slice_y
=
...
@@ -135,7 +135,7 @@ void LstsqKernel(const Context& dev_ctx,
...
@@ -135,7 +135,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor
*
res_r
=
new
DenseTensor
();
DenseTensor
*
res_r
=
new
DenseTensor
();
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
phi
::
Tril
Triu
Kernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TrilKernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TriangularSolveKernel
<
T
,
Context
>
(
phi
::
TriangularSolveKernel
<
T
,
Context
>
(
dev_ctx
,
*
res_r
,
*
new_y
,
true
,
true
,
false
,
solution
);
dev_ctx
,
*
res_r
,
*
new_y
,
true
,
true
,
false
,
solution
);
...
...
paddle/phi/kernels/gpu/
where_index
_kernel.cu
→
paddle/phi/kernels/gpu/
nonzero
_kernel.cu
浏览文件 @
399047d7
...
@@ -25,7 +25,7 @@ namespace cub = hipcub;
...
@@ -25,7 +25,7 @@ namespace cub = hipcub;
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/select_impl.cu.h"
#include "paddle/phi/kernels/funcs/select_impl.cu.h"
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
namespace
phi
{
namespace
phi
{
template
<
typename
MaskT
,
typename
IndexT
,
typename
OutT
>
template
<
typename
MaskT
,
typename
IndexT
,
typename
OutT
>
...
@@ -62,9 +62,9 @@ struct IndexFunctor {
...
@@ -62,9 +62,9 @@ struct IndexFunctor {
};
};
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
DenseTensor
in_data
;
DenseTensor
in_data
;
auto
dims
=
condition
.
dims
();
auto
dims
=
condition
.
dims
();
using
Functor
=
IndexFunctor
<
T
,
int64_t
,
int64_t
>
;
using
Functor
=
IndexFunctor
<
T
,
int64_t
,
int64_t
>
;
...
@@ -74,10 +74,10 @@ void WhereIndexKernel(const Context &dev_ctx,
...
@@ -74,10 +74,10 @@ void WhereIndexKernel(const Context &dev_ctx,
}
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
where_index
,
PD_REGISTER_KERNEL
(
nonzero
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
phi
::
NonZero
Kernel
,
int64_t
,
int64_t
,
int
,
int
,
int16_t
,
int16_t
,
...
...
paddle/phi/kernels/gpu/
reduce_
prod_grad_kernel.cu
→
paddle/phi/kernels/gpu/prod_grad_kernel.cu
浏览文件 @
399047d7
...
@@ -12,16 +12,16 @@
...
@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL
(
prod_grad
,
PD_REGISTER_KERNEL
(
prod_grad
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Reduce
ProdGradKernel
,
phi
::
ProdGradKernel
,
float
,
float
,
double
,
double
,
int
,
int
,
...
...
paddle/phi/kernels/gpu/qr_kernel.cu
浏览文件 @
399047d7
...
@@ -31,7 +31,7 @@
...
@@ -31,7 +31,7 @@
#include "paddle/phi/kernels/qr_kernel.h"
#include "paddle/phi/kernels/qr_kernel.h"
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
namespace
phi
{
...
@@ -103,12 +103,12 @@ void QrKernel(const Context& ctx,
...
@@ -103,12 +103,12 @@ void QrKernel(const Context& ctx,
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
sliced_qr
=
SliceKernel
<
T
,
Context
>
(
auto
sliced_qr
=
SliceKernel
<
T
,
Context
>
(
ctx
,
trans_qr
,
{
trans_qr
.
dims
().
size
()
-
2
},
{
0
},
{
min_mn
},
{
1
},
{});
ctx
,
trans_qr
,
{
trans_qr
.
dims
().
size
()
-
2
},
{
0
},
{
min_mn
},
{
1
},
{});
auto
tmp_r
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
sliced_qr
,
0
,
false
);
auto
tmp_r
=
Tril
<
T
,
Context
>
(
ctx
,
sliced_qr
,
0
,
false
);
// Transpose 'tmp_r' to retore the original row-major order
// Transpose 'tmp_r' to retore the original row-major order
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
}
else
{
}
else
{
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
tmp_r
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
trans_qr
,
0
,
false
);
auto
tmp_r
=
Tril
<
T
,
Context
>
(
ctx
,
trans_qr
,
0
,
false
);
// Transpose 'tmp_r' to retore the original row-major order
// Transpose 'tmp_r' to retore the original row-major order
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
}
}
...
...
paddle/phi/kernels/gpu/tril_
triu
_kernel.cu
→
paddle/phi/kernels/gpu/tril_
grad
_kernel.cu
浏览文件 @
399047d7
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu
_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
grad
_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril_
triu
,
PD_REGISTER_KERNEL
(
tril_
grad
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Tril
Triu
Kernel
,
phi
::
Tril
Grad
Kernel
,
bool
,
bool
,
float
,
float
,
double
,
double
,
...
...
paddle/phi/kernels/gpu/tril_
triu_grad_
kernel.cu
→
paddle/phi/kernels/gpu/tril_kernel.cu
浏览文件 @
399047d7
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu_grad_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril
_triu_grad
,
PD_REGISTER_KERNEL
(
tril
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Tril
TriuGrad
Kernel
,
phi
::
TrilKernel
,
bool
,
bool
,
float
,
float
,
double
,
double
,
...
...
paddle/phi/kernels/gpu/uniform_
random_
inplace_grad_kernel.cu
→
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_grad_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
...
@@ -20,15 +20,15 @@ limitations under the License. */
...
@@ -20,15 +20,15 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceGradKernel
(
const
Context
&
ctx
,
void
UniformInplaceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
x_grad
)
{
DenseTensor
*
x_grad
)
{
auto
dims
=
vectorize
(
x_grad
->
dims
());
auto
dims
=
vectorize
(
x_grad
->
dims
());
float
value
=
static_cast
<
float
>
(
0.0
f
);
float
value
=
static_cast
<
float
>
(
0.0
f
);
phi
::
FullKernel
<
T
>
(
ctx
,
dims
,
value
,
phi
::
DataType
::
UNDEFINED
,
x_grad
);
phi
::
FullKernel
<
T
>
(
ctx
,
dims
,
value
,
phi
::
DataType
::
UNDEFINED
,
x_grad
);
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace_grad
,
PD_REGISTER_KERNEL
(
uniform_inplace_grad
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceGradKernel
,
phi
::
UniformInplaceGradKernel
,
float
,
float
,
double
)
{}
double
)
{}
paddle/phi/kernels/gpu/uniform_
random_
inplace_kernel.cu
→
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include <thrust/random.h>
#include <thrust/random.h>
...
@@ -54,15 +54,15 @@ struct UniformGenerator {
...
@@ -54,15 +54,15 @@ struct UniformGenerator {
};
};
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceKernel
(
const
Context
&
ctx
,
void
UniformInplaceKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
ctx
.
template
Alloc
<
T
>(
out
);
ctx
.
template
Alloc
<
T
>(
out
);
if
(
seed
==
0
)
{
if
(
seed
==
0
)
{
// Use global Generator seed
// Use global Generator seed
...
@@ -80,9 +80,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
...
@@ -80,9 +80,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace
,
PD_REGISTER_KERNEL
(
uniform_inplace
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceKernel
,
phi
::
UniformInplaceKernel
,
float
,
float
,
double
)
{}
double
)
{}
paddle/phi/kernels/gpu/uniform_
random_
kernel.cu
→
paddle/phi/kernels/gpu/uniform_kernel.cu
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include <thrust/random.h>
#include <thrust/random.h>
...
@@ -54,16 +54,16 @@ struct UniformGenerator {
...
@@ -54,16 +54,16 @@ struct UniformGenerator {
};
};
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
dev_ctx
.
template
Alloc
<
T
>(
out
);
dev_ctx
.
template
Alloc
<
T
>(
out
);
if
(
seed
==
0
)
{
if
(
seed
==
0
)
{
...
@@ -86,10 +86,10 @@ void UniformRandomRawKernel(const Context& dev_ctx,
...
@@ -86,10 +86,10 @@ void UniformRandomRawKernel(const Context& dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w
,
PD_REGISTER_KERNEL
(
uniform_raw
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
UniformRa
ndomRa
wKernel
,
phi
::
UniformRawKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
float16
)
{}
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
);
}
// namespace phi
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
out
,
DenseTensor
*
pre_out
,
DenseTensor
*
w_out
);
}
// namespace phi
paddle/phi/kernels/
yolov3
_loss_grad_kernel.h
→
paddle/phi/kernels/
hsigmoid
_loss_grad_kernel.h
浏览文件 @
399047d7
...
@@ -19,24 +19,24 @@
...
@@ -19,24 +19,24 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Yolov3LossGradKernel
(
const
Context
&
dev_
ctx
,
void
HSigmoidLossGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
w
,
const
DenseTensor
&
gt_
label
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
DenseTensor
&
objectness_mask
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
DenseTensor
&
gt_match_mask
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
loss_grad
,
const
DenseTensor
&
pre_out
,
const
std
::
vector
<
int
>&
anchors
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
num_classes
,
int
class_num
,
bool
remote_prefetch
,
float
ignore_thresh
,
int
trainer_id
,
int
downsample_ratio
,
const
std
::
vector
<
int64_t
>&
height_sections
,
bool
use_label_smooth
,
const
std
::
vector
<
std
::
string
>&
epmap
,
float
scale_x_Y
,
const
std
::
vector
<
std
::
string
>&
table_names
,
DenseTensor
*
x_grad
,
bool
is_sparse
,
DenseTensor
*
gt_bo
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
gt_label
_grad
,
DenseTensor
*
w
_grad
,
DenseTensor
*
gt_score
_grad
);
DenseTensor
*
bias
_grad
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/hsigmoid_loss_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HSigmoidLossKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
out
,
DenseTensor
*
pre_out
,
DenseTensor
*
w_out
);
}
// namespace phi
paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h
→
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
浏览文件 @
399047d7
...
@@ -17,19 +17,19 @@
...
@@ -17,19 +17,19 @@
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/impl/reduce_grad.h"
#include "paddle/phi/kernels/impl/reduce_grad.h"
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Reduce
ProdGradKernel
(
const
Context
&
dev_ctx
,
void
ProdGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
const
IntArray
&
dims
,
const
IntArray
&
dims
,
bool
keep_dim
,
bool
keep_dim
,
bool
reduce_all
,
bool
reduce_all
,
DenseTensor
*
x_grad
)
{
DenseTensor
*
x_grad
)
{
ReduceGradKernel
<
Context
,
T
,
funcs
::
ProdGradFunctor
>
(
ReduceGradKernel
<
Context
,
T
,
funcs
::
ProdGradFunctor
>
(
dev_ctx
,
x
,
out
,
out_grad
,
dims
.
GetData
(),
keep_dim
,
reduce_all
,
x_grad
);
dev_ctx
,
x
,
out
,
out_grad
,
dims
.
GetData
(),
keep_dim
,
reduce_all
,
x_grad
);
}
}
...
...
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
浏览文件 @
399047d7
...
@@ -29,7 +29,7 @@
...
@@ -29,7 +29,7 @@
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
namespace
phi
{
...
@@ -116,8 +116,8 @@ void QrGradKernel(const Context& ctx,
...
@@ -116,8 +116,8 @@ void QrGradKernel(const Context& ctx,
DenseTensor
M_tmp1
=
Subtract
<
T
,
Context
>
(
ctx
,
R_term
,
Q_term
);
DenseTensor
M_tmp1
=
Subtract
<
T
,
Context
>
(
ctx
,
R_term
,
Q_term
);
// Compute M = (tril(M) + tril(M).mH()) * 0.5 Identity
// Compute M = (tril(M) + tril(M).mH()) * 0.5 Identity
DenseTensor
M_tril_0
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
M_tmp1
,
0
,
true
);
DenseTensor
M_tril_0
=
Tril
<
T
,
Context
>
(
ctx
,
M_tmp1
,
0
,
true
);
DenseTensor
M_tril_1
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
M_tmp1
,
-
1
,
true
);
DenseTensor
M_tril_1
=
Tril
<
T
,
Context
>
(
ctx
,
M_tmp1
,
-
1
,
true
);
DenseTensor
M
=
Add
<
T
,
Context
>
(
DenseTensor
M
=
Add
<
T
,
Context
>
(
ctx
,
M_tril_0
,
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
M_tril_1
));
ctx
,
M_tril_0
,
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
M_tril_1
));
...
...
paddle/phi/kernels/impl/tril_
triu_
grad_kernel_impl.h
→
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
浏览文件 @
399047d7
...
@@ -16,16 +16,16 @@
...
@@ -16,16 +16,16 @@
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_
triu_
grad_kernel.h"
#include "paddle/phi/kernels/tril_grad_kernel.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
x_grad
)
{
DenseTensor
*
x_grad
)
{
const
auto
*
dout_data
=
out_grad
.
data
<
T
>
();
const
auto
*
dout_data
=
out_grad
.
data
<
T
>
();
auto
*
dx_data
=
ctx
.
template
Alloc
<
T
>(
x_grad
);
auto
*
dx_data
=
ctx
.
template
Alloc
<
T
>(
x_grad
);
...
...
paddle/phi/kernels/impl/tril_
triu_
kernel_impl.h
→
paddle/phi/kernels/impl/tril_kernel_impl.h
浏览文件 @
399047d7
...
@@ -16,16 +16,16 @@
...
@@ -16,16 +16,16 @@
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
const
auto
*
x_data
=
x
.
data
<
T
>
();
const
auto
*
x_data
=
x
.
data
<
T
>
();
auto
*
out_data
=
ctx
.
template
Alloc
<
T
>(
out
);
auto
*
out_data
=
ctx
.
template
Alloc
<
T
>(
out
);
...
...
paddle/phi/kernels/kps/
reduce_
prod_kernel.cu
→
paddle/phi/kernels/kps/prod_kernel.cu
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/reduce.h"
#include "paddle/phi/kernels/gpu/reduce.h"
...
...
paddle/phi/kernels/
where_index
_kernel.h
→
paddle/phi/kernels/
nonzero
_kernel.h
浏览文件 @
399047d7
...
@@ -19,8 +19,8 @@
...
@@ -19,8 +19,8 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
);
DenseTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/
reduce_
prod_grad_kernel.h
→
paddle/phi/kernels/prod_grad_kernel.h
浏览文件 @
399047d7
...
@@ -20,12 +20,12 @@
...
@@ -20,12 +20,12 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Reduce
ProdGradKernel
(
const
Context
&
dev_ctx
,
void
ProdGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
const
IntArray
&
dims
,
const
IntArray
&
dims
,
bool
keep_dim
,
bool
keep_dim
,
bool
reduce_all
,
bool
reduce_all
,
DenseTensor
*
x_grad
);
DenseTensor
*
x_grad
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/prod_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
paddle/phi/kernels/
reduce_
prod_kernel.h
→
paddle/phi/kernels/prod_kernel.h
浏览文件 @
399047d7
文件已移动
paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.cc
→
paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.cc
浏览文件 @
399047d7
...
@@ -12,12 +12,12 @@
...
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.h"
#include "paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_grad.h"
#include "paddle/phi/kernels/cpu/h
sigmoid_loss
_grad.h"
namespace
phi
{
namespace
phi
{
namespace
sr
{
namespace
sr
{
...
@@ -36,25 +36,25 @@ static std::vector<int64_t> PathToRows(const DenseTensor& path) {
...
@@ -36,25 +36,25 @@ static std::vector<int64_t> PathToRows(const DenseTensor& path) {
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
GradKernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
GradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
SelectedRows
*
w_grad
,
SelectedRows
*
w_grad
,
DenseTensor
*
bias_grad
)
{
DenseTensor
*
bias_grad
)
{
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
path
.
get_ptr
(),
path
.
get_ptr
(),
errors
::
NotFound
(
"Custom tree must be set for sparse mode!"
));
errors
::
NotFound
(
"Custom tree must be set for sparse mode!"
));
...
@@ -66,34 +66,34 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
...
@@ -66,34 +66,34 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
phi
::
DDim
temp_dim
(
w
.
dims
());
phi
::
DDim
temp_dim
(
w
.
dims
());
temp_dim
[
0
]
=
real_rows
.
size
();
temp_dim
[
0
]
=
real_rows
.
size
();
w_grad_value
->
Resize
(
temp_dim
);
w_grad_value
->
Resize
(
temp_dim
);
phi
::
H
ierarchicalSigmoid
GradKernelImpl
<
T
>
(
ctx
,
phi
::
H
SigmoidLoss
GradKernelImpl
<
T
>
(
ctx
,
x
,
x
,
w
,
w
,
label
,
label
,
path
,
path
,
code
,
code
,
bias
,
bias
,
pre_out
,
pre_out
,
out_grad
,
out_grad
,
num_classes
,
num_classes
,
remote_prefetch
,
remote_prefetch
,
trainer_id
,
trainer_id
,
height_sections
,
height_sections
,
epmap
,
epmap
,
table_names
,
table_names
,
is_sparse
,
is_sparse
,
x_grad
,
x_grad
,
w_grad_value
,
w_grad_value
,
bias_grad
,
bias_grad
,
w_grad
);
w_grad
);
}
}
}
// namespace sr
}
// namespace sr
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
h
ierarchical_sigmoid
_grad_sr
,
PD_REGISTER_KERNEL
(
h
sigmoid_loss
_grad_sr
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
sr
::
H
ierarchicalSigmoid
GradKernel
,
phi
::
sr
::
H
SigmoidLoss
GradKernel
,
float
,
float
,
double
)
{}
double
)
{}
paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.h
→
paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.h
浏览文件 @
399047d7
...
@@ -21,25 +21,25 @@ namespace phi {
...
@@ -21,25 +21,25 @@ namespace phi {
namespace
sr
{
namespace
sr
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
GradKernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
GradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
int
num_classes
,
bool
remote_prefetch
,
bool
remote_prefetch
,
int
trainer_id
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
SelectedRows
*
w_grad
,
SelectedRows
*
w_grad
,
DenseTensor
*
bias_grad
);
DenseTensor
*
bias_grad
);
}
// namespace sr
}
// namespace sr
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/selected_rows/uniform_
random_
kernel.cc
→
paddle/phi/kernels/selected_rows/uniform_kernel.cc
浏览文件 @
399047d7
...
@@ -12,95 +12,85 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,95 +12,85 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/selected_rows/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/selected_rows/uniform_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
namespace
phi
{
namespace
phi
{
namespace
sr
{
namespace
sr
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
SelectedRows
*
out
)
{
SelectedRows
*
out
)
{
phi
::
UniformRa
ndomRa
wKernel
<
T
>
(
dev_ctx
,
phi
::
UniformRawKernel
<
T
>
(
dev_ctx
,
shape
,
shape
,
dtype
,
dtype
,
min
,
min
,
max
,
max
,
seed
,
seed
,
diag_num
,
diag_num
,
diag_step
,
diag_step
,
diag_val
,
diag_val
,
out
->
mutable_value
());
out
->
mutable_value
());
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
SelectedRows
*
out
)
{
SelectedRows
*
out
)
{
phi
::
Uniform
Random
Kernel
<
T
>
(
phi
::
UniformKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
out
->
mutable_value
());
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
out
->
mutable_value
());
}
}
}
// namespace sr
}
// namespace sr
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w_sr
,
PD_REGISTER_KERNEL
(
uniform_raw_sr
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRa
ndomRa
wKernel
,
phi
::
sr
::
UniformRawKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
uniform_
random_
sr
,
PD_REGISTER_KERNEL
(
uniform_sr
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
sr
::
Uniform
Random
Kernel
,
phi
::
sr
::
UniformKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL
(
uniform_random_raw_sr
,
PD_REGISTER_KERNEL
(
GPU
,
uniform_raw_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRawKernel
,
float
,
double
)
{
ALL_LAYOUT
,
}
phi
::
sr
::
UniformRandomRawKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
uniform_random_sr
,
PD_REGISTER_KERNEL
(
GPU
,
uniform_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformKernel
,
float
,
double
)
{}
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomKernel
,
float
,
double
)
{}
#endif
#endif
#if defined(PADDLE_WITH_XPU)
#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL
(
uniform_random_raw_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomRawKernel
,
float
)
{}
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
uniform_random_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomKernel
,
float
)
{}
uniform_raw_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRawKernel
,
float
)
{}
PD_REGISTER_KERNEL
(
uniform_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformKernel
,
float
)
{
}
#endif
#endif
paddle/phi/kernels/selected_rows/uniform_
random_
kernel.h
→
paddle/phi/kernels/selected_rows/uniform_kernel.h
浏览文件 @
399047d7
...
@@ -22,25 +22,25 @@ namespace phi {
...
@@ -22,25 +22,25 @@ namespace phi {
namespace
sr
{
namespace
sr
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
SelectedRows
*
out
);
SelectedRows
*
out
);
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
SelectedRows
*
out
);
SelectedRows
*
out
);
}
// namespace sr
}
// namespace sr
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/tril_
triu_
grad_kernel.h
→
paddle/phi/kernels/tril_grad_kernel.h
浏览文件 @
399047d7
...
@@ -19,10 +19,10 @@
...
@@ -19,10 +19,10 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
x_grad
);
DenseTensor
*
x_grad
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/tril_
triu_
kernel.h
→
paddle/phi/kernels/tril_kernel.h
浏览文件 @
399047d7
...
@@ -20,21 +20,21 @@
...
@@ -20,21 +20,21 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
out
);
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
DenseTensor
Tril
Triu
(
const
Context
&
ctx
,
DenseTensor
Tril
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
diagonal
,
int
diagonal
,
bool
lower
)
{
bool
lower
)
{
DenseTensor
dense_out
;
DenseTensor
dense_out
;
MetaTensor
meta_out
(
&
dense_out
);
MetaTensor
meta_out
(
&
dense_out
);
Tril
Triu
InferMeta
(
x
,
diagonal
,
lower
,
&
meta_out
);
TrilInferMeta
(
x
,
diagonal
,
lower
,
&
meta_out
);
Tril
Triu
Kernel
<
T
,
Context
>
(
ctx
,
x
,
diagonal
,
lower
,
&
dense_out
);
TrilKernel
<
T
,
Context
>
(
ctx
,
x
,
diagonal
,
lower
,
&
dense_out
);
return
dense_out
;
return
dense_out
;
}
}
...
...
paddle/phi/kernels/uniform_
random_inplace
_kernel.h
→
paddle/phi/kernels/uniform_
inplace_grad
_kernel.h
浏览文件 @
399047d7
...
@@ -19,14 +19,14 @@ limitations under the License. */
...
@@ -19,14 +19,14 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
RandomInplace
Kernel
(
const
Context
&
ctx
,
void
Uniform
InplaceGrad
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out_grad
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
);
DenseTensor
*
x_grad
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/uniform_
random_inplace_grad
_kernel.h
→
paddle/phi/kernels/uniform_
inplace
_kernel.h
浏览文件 @
399047d7
...
@@ -19,14 +19,14 @@ limitations under the License. */
...
@@ -19,14 +19,14 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
RandomInplaceGrad
Kernel
(
const
Context
&
ctx
,
void
Uniform
Inplace
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
x
,
float
min
,
float
min
,
float
max
,
float
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
x_grad
);
DenseTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/uniform_
random_
kernel.cc
→
paddle/phi/kernels/uniform_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar.h"
...
@@ -29,38 +29,36 @@
...
@@ -29,38 +29,36 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRandomKernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
UniformRandomRawKernel
<
T
>
(
UniformRawKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
0
,
0
,
0.0
f
,
out
);
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
0
,
0
,
0.0
f
,
out
);
}
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform
_random
,
PD_REGISTER_KERNEL
(
uniform
,
CPU
,
CPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
Kernel
,
phi
::
UniformKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL
(
uniform
_random
,
PD_REGISTER_KERNEL
(
uniform
,
GPU
,
GPU
,
ALL_LAYOUT
,
ALL_LAYOUT
,
phi
::
Uniform
Random
Kernel
,
phi
::
UniformKernel
,
float
,
float
,
double
,
double
,
phi
::
dtype
::
float16
)
{}
phi
::
dtype
::
float16
)
{}
#endif
#endif
#ifdef PADDLE_WITH_XPU
#ifdef PADDLE_WITH_XPU
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
uniform
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformKernel
,
float
)
{}
uniform_random
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRandomKernel
,
float
)
{}
#endif
#endif
paddle/phi/kernels/uniform_
random_
kernel.h
→
paddle/phi/kernels/uniform_kernel.h
浏览文件 @
399047d7
...
@@ -22,24 +22,24 @@
...
@@ -22,24 +22,24 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
);
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
DenseTensor
*
out
);
DenseTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/xpu/
where_index
_kernel.cc
→
paddle/phi/kernels/xpu/
nonzero
_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
...
@@ -22,9 +22,9 @@
...
@@ -22,9 +22,9 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
const
T
*
cond_data
=
condition
.
data
<
T
>
();
const
T
*
cond_data
=
condition
.
data
<
T
>
();
auto
numel
=
condition
.
numel
();
auto
numel
=
condition
.
numel
();
auto
dims
=
condition
.
dims
();
auto
dims
=
condition
.
dims
();
...
@@ -69,4 +69,4 @@ void WhereIndexKernel(const Context& dev_ctx,
...
@@ -69,4 +69,4 @@ void WhereIndexKernel(const Context& dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
where_index
,
XPU
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
int
,
bool
,
float
)
{}
nonzero
,
XPU
,
ALL_LAYOUT
,
phi
::
NonZero
Kernel
,
int
,
bool
,
float
)
{}
paddle/phi/kernels/xpu/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/xpu/prod_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
...
...
paddle/phi/kernels/xpu/tril_
triu_
grad_kernel.cc
→
paddle/phi/kernels/xpu/tril_grad_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/tril_
triu_
grad_kernel.h"
#include "paddle/phi/kernels/tril_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
@@ -20,11 +20,11 @@
...
@@ -20,11 +20,11 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
x_grad
)
{
DenseTensor
*
x_grad
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
ctx
.
template
Alloc
<
T
>(
x_grad
);
ctx
.
template
Alloc
<
T
>(
x_grad
);
auto
dy_shape
=
vectorize
<
int
>
(
out_grad
.
dims
());
auto
dy_shape
=
vectorize
<
int
>
(
out_grad
.
dims
());
...
@@ -49,4 +49,4 @@ void TrilTriuGradKernel(const Context& ctx,
...
@@ -49,4 +49,4 @@ void TrilTriuGradKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
tril_
triu_grad
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilTriu
GradKernel
,
int
,
float
)
{}
tril_
grad
,
XPU
,
ALL_LAYOUT
,
phi
::
Tril
GradKernel
,
int
,
float
)
{}
paddle/phi/kernels/xpu/tril_
triu_
kernel.cc
→
paddle/phi/kernels/xpu/tril_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
@@ -20,11 +20,11 @@
...
@@ -20,11 +20,11 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
diagonal
,
int
diagonal
,
bool
lower
,
bool
lower
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
ctx
.
template
Alloc
<
T
>(
out
);
ctx
.
template
Alloc
<
T
>(
out
);
auto
xshape
=
vectorize
<
int
>
(
x
.
dims
());
auto
xshape
=
vectorize
<
int
>
(
x
.
dims
());
...
@@ -48,5 +48,4 @@ void TrilTriuKernel(const Context& ctx,
...
@@ -48,5 +48,4 @@ void TrilTriuKernel(const Context& ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
tril
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilKernel
,
int
,
float
)
{}
tril_triu
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilTriuKernel
,
int
,
float
)
{}
paddle/phi/kernels/xpu/uniform_
random_
kernel.cc
→
paddle/phi/kernels/xpu/uniform_kernel.cc
浏览文件 @
399047d7
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include <string>
#include <string>
...
@@ -24,16 +24,16 @@ limitations under the License. */
...
@@ -24,16 +24,16 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
DataType
dtype
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
min
,
const
Scalar
&
max
,
const
Scalar
&
max
,
int
seed
,
int
seed
,
int
diag_num
,
int
diag_num
,
int
diag_step
,
int
diag_step
,
float
diag_val
,
float
diag_val
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
out
->
Resize
(
phi
::
make_ddim
(
shape
.
GetData
()));
T
*
data
=
dev_ctx
.
template
Alloc
<
T
>(
out
);
T
*
data
=
dev_ctx
.
template
Alloc
<
T
>(
out
);
int64_t
size
=
out
->
numel
();
int64_t
size
=
out
->
numel
();
...
@@ -76,5 +76,5 @@ void UniformRandomRawKernel(const Context &dev_ctx,
...
@@ -76,5 +76,5 @@ void UniformRandomRawKernel(const Context &dev_ctx,
}
// namespace phi
}
// namespace phi
PD_REGISTER_KERNEL
(
PD_REGISTER_KERNEL
(
uniform_raw
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRawKernel
,
float
)
{
uniform_random_raw
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRandomRawKernel
,
float
)
{
}
}
paddle/phi/kernels/yolo_loss_grad_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
YoloLossGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
DenseTensor
&
objectness_mask
,
const
DenseTensor
&
gt_match_mask
,
const
DenseTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
x_grad
,
DenseTensor
*
gt_box_grad
,
DenseTensor
*
gt_label_grad
,
DenseTensor
*
gt_score_grad
);
}
// namespace phi
paddle/phi/kernels/yolo_loss_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
YoloLossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
loss
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
gt_match_mask
);
}
// namespace phi
paddle/phi/kernels/yolov3_loss_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Yolov3LossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
loss
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
gt_match_mask
);
}
// namespace phi
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
浏览文件 @
399047d7
...
@@ -18,7 +18,7 @@ namespace phi {
...
@@ -18,7 +18,7 @@ namespace phi {
KernelSignature
HierarchicalSigmoidOpArgumentMapping
(
KernelSignature
HierarchicalSigmoidOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"h
ierarchical_sigmoid
"
,
return
KernelSignature
(
"h
sigmoid_loss
"
,
{
"X"
,
"W"
,
"Label"
,
"PathTable"
,
"PathCode"
,
"Bias"
},
{
"X"
,
"W"
,
"Label"
,
"PathTable"
,
"PathCode"
,
"Bias"
},
{
"num_classes"
,
{
"num_classes"
,
"remote_prefetch"
,
"remote_prefetch"
,
...
@@ -33,7 +33,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
...
@@ -33,7 +33,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
KernelSignature
HierarchicalSigmoidGradOpArgumentMapping
(
KernelSignature
HierarchicalSigmoidGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsDenseTensorOutput
(
"W@GRAD"
))
{
if
(
ctx
.
IsDenseTensorOutput
(
"W@GRAD"
))
{
return
KernelSignature
(
"h
ierarchical_sigmoid
_grad"
,
return
KernelSignature
(
"h
sigmoid_loss
_grad"
,
{
"X"
,
{
"X"
,
"W"
,
"W"
,
"Label"
,
"Label"
,
...
@@ -51,7 +51,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
...
@@ -51,7 +51,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"is_sparse"
},
"is_sparse"
},
{
"X@GRAD"
,
"W@GRAD"
,
"Bias@GRAD"
});
{
"X@GRAD"
,
"W@GRAD"
,
"Bias@GRAD"
});
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"W@GRAD"
))
{
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"W@GRAD"
))
{
return
KernelSignature
(
"h
ierarchical_sigmoid
_grad_sr"
,
return
KernelSignature
(
"h
sigmoid_loss
_grad_sr"
,
{
"X"
,
{
"X"
,
"W"
,
"W"
,
"Label"
,
"Label"
,
...
@@ -75,6 +75,9 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
...
@@ -75,6 +75,9 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
}
// namespace phi
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
hierarchical_sigmoid
,
hsigmoid_loss
);
PD_REGISTER_BASE_KERNEL_NAME
(
hierarchical_sigmoid_grad
,
hsigmoid_loss_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid
,
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid
,
phi
::
HierarchicalSigmoidOpArgumentMapping
);
phi
::
HierarchicalSigmoidOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid_grad
,
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid_grad
,
...
...
paddle/phi/ops/compat/tril_triu_sig.cc
浏览文件 @
399047d7
...
@@ -17,16 +17,19 @@ limitations under the License. */
...
@@ -17,16 +17,19 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
KernelSignature
TrilTriuOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
KernelSignature
TrilTriuOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"tril
_triu
"
,
{
"X"
},
{
"diagonal"
,
"lower"
},
{
"Out"
});
return
KernelSignature
(
"tril"
,
{
"X"
},
{
"diagonal"
,
"lower"
},
{
"Out"
});
}
}
KernelSignature
TrilTriuGradOpArgumentMapping
(
KernelSignature
TrilTriuGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
return
KernelSignature
(
"tril_
triu_
grad"
,
{
"Out@GRAD"
},
{
"diagonal"
,
"lower"
},
{
"X@GRAD"
});
"tril_grad"
,
{
"Out@GRAD"
},
{
"diagonal"
,
"lower"
},
{
"X@GRAD"
});
}
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
tril_triu
,
tril
);
PD_REGISTER_BASE_KERNEL_NAME
(
tril_triu_grad
,
tril_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu
,
phi
::
TrilTriuOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu
,
phi
::
TrilTriuOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu_grad
,
phi
::
TrilTriuGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu_grad
,
phi
::
TrilTriuGradOpArgumentMapping
);
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
浏览文件 @
399047d7
...
@@ -18,7 +18,7 @@ namespace phi {
...
@@ -18,7 +18,7 @@ namespace phi {
KernelSignature
UniformRandomInplaceOpArgumentMapping
(
KernelSignature
UniformRandomInplaceOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
return
KernelSignature
(
"uniform_
random_
inplace"
,
"uniform_inplace"
,
{
"X"
},
{
"X"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"Out"
});
{
"Out"
});
...
@@ -27,7 +27,7 @@ KernelSignature UniformRandomInplaceOpArgumentMapping(
...
@@ -27,7 +27,7 @@ KernelSignature UniformRandomInplaceOpArgumentMapping(
KernelSignature
UniformRandomInplaceGradOpArgumentMapping
(
KernelSignature
UniformRandomInplaceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
return
KernelSignature
(
"uniform_
random_
inplace_grad"
,
"uniform_inplace_grad"
,
{
"Out@GRAD"
},
{
"Out@GRAD"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"X@GRAD"
});
{
"X@GRAD"
});
...
@@ -35,6 +35,8 @@ KernelSignature UniformRandomInplaceGradOpArgumentMapping(
...
@@ -35,6 +35,8 @@ KernelSignature UniformRandomInplaceGradOpArgumentMapping(
}
// namespace phi
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
uniform_random_inplace
,
uniform_inplace
);
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random_inplace
,
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random_inplace
,
phi
::
UniformRandomInplaceOpArgumentMapping
);
phi
::
UniformRandomInplaceOpArgumentMapping
);
...
...
paddle/phi/ops/compat/uniform_random_sig.cc
浏览文件 @
399047d7
...
@@ -22,7 +22,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -22,7 +22,7 @@ KernelSignature UniformRandomOpArgumentMapping(
if
(
ctx
.
IsDenseTensorOutput
(
"Out"
))
{
if
(
ctx
.
IsDenseTensorOutput
(
"Out"
))
{
if
(
diag_num
)
{
if
(
diag_num
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{},
{
"ShapeTensorList"
,
{
"ShapeTensorList"
,
"dtype"
,
"dtype"
,
...
@@ -37,7 +37,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -37,7 +37,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{},
{
"ShapeTensor"
,
{
"ShapeTensor"
,
"dtype"
,
"dtype"
,
...
@@ -49,7 +49,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -49,7 +49,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"
},
"diag_val"
},
{
"Out"
});
{
"Out"
});
}
else
{
}
else
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{},
{
"shape"
,
{
"shape"
,
"dtype"
,
"dtype"
,
...
@@ -65,7 +65,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -65,7 +65,7 @@ KernelSignature UniformRandomOpArgumentMapping(
}
else
{
}
else
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
return
KernelSignature
(
"uniform
_random
"
,
"uniform"
,
{},
{},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"Out"
});
...
@@ -73,22 +73,20 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -73,22 +73,20 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform
_random
"
,
return
KernelSignature
(
"uniform"
,
{},
{},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"Out"
});
}
else
{
}
else
{
return
KernelSignature
(
"uniform_random"
,
return
KernelSignature
(
{},
"uniform"
,
{},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
}
}
}
}
}
}
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"Out"
))
{
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"Out"
))
{
if
(
diag_num
)
{
if
(
diag_num
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{},
{
"ShapeTensorList"
,
{
"ShapeTensorList"
,
"dtype"
,
"dtype"
,
...
@@ -103,7 +101,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -103,7 +101,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{},
{
"ShapeTensor"
,
{
"ShapeTensor"
,
"dtype"
,
"dtype"
,
...
@@ -115,7 +113,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -115,7 +113,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"
},
"diag_val"
},
{
"Out"
});
{
"Out"
});
}
else
{
}
else
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{},
{
"shape"
,
{
"shape"
,
"dtype"
,
"dtype"
,
...
@@ -131,7 +129,7 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -131,7 +129,7 @@ KernelSignature UniformRandomOpArgumentMapping(
}
else
{
}
else
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
return
KernelSignature
(
"uniform_
random_
sr"
,
"uniform_sr"
,
{},
{},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"Out"
});
...
@@ -139,12 +137,12 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -139,12 +137,12 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_
random_
sr"
,
return
KernelSignature
(
"uniform_sr"
,
{},
{},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"Out"
});
}
else
{
}
else
{
return
KernelSignature
(
"uniform_
random_
sr"
,
return
KernelSignature
(
"uniform_sr"
,
{},
{},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
{
"Out"
});
...
@@ -156,4 +154,6 @@ KernelSignature UniformRandomOpArgumentMapping(
...
@@ -156,4 +154,6 @@ KernelSignature UniformRandomOpArgumentMapping(
}
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
uniform_random
,
uniform
);
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random
,
phi
::
UniformRandomOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random
,
phi
::
UniformRandomOpArgumentMapping
);
paddle/phi/ops/compat/where_index_sig.cc
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
KernelSignature
WhereIndexOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"nonzero"
,
{
"Condition"
},
{},
{
"Out"
});
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
where_index
,
nonzero
);
PD_REGISTER_ARG_MAPPING_FN
(
where_index
,
phi
::
WhereIndexOpArgumentMapping
);
paddle/phi/ops/compat/yolov3_loss_sig.cc
浏览文件 @
399047d7
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
namespace
phi
{
namespace
phi
{
KernelSignature
Yolov3LossOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
KernelSignature
Yolov3LossOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"yolo
v3
_loss"
,
return
KernelSignature
(
"yolo_loss"
,
{
"X"
,
"GTBox"
,
"GTLabel"
,
"GTScore"
},
{
"X"
,
"GTBox"
,
"GTLabel"
,
"GTScore"
},
{
"anchors"
,
{
"anchors"
,
"anchor_mask"
,
"anchor_mask"
,
...
@@ -32,7 +32,7 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) {
...
@@ -32,7 +32,7 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature
Yolov3LossGradOpArgumentMapping
(
KernelSignature
Yolov3LossGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
return
KernelSignature
(
"yolo
v3
_loss_grad"
,
"yolo_loss_grad"
,
{
"X"
,
{
"X"
,
"GTBox"
,
"GTBox"
,
"GTLabel"
,
"GTLabel"
,
...
@@ -51,6 +51,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping(
...
@@ -51,6 +51,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping(
}
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
yolov3_loss
,
yolo_loss
);
PD_REGISTER_BASE_KERNEL_NAME
(
yolov3_loss_grad
,
yolo_loss_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss
,
phi
::
Yolov3LossOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss
,
phi
::
Yolov3LossOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss_grad
,
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss_grad
,
phi
::
Yolov3LossGradOpArgumentMapping
);
phi
::
Yolov3LossGradOpArgumentMapping
);
python/paddle/fluid/initializer.py
浏览文件 @
399047d7
...
@@ -309,7 +309,7 @@ class UniformInitializer(Initializer):
...
@@ -309,7 +309,7 @@ class UniformInitializer(Initializer):
if
framework
.
_non_static_mode
():
if
framework
.
_non_static_mode
():
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
var
.
shape
,
var
.
shape
,
out_dtype
,
out_dtype
,
self
.
_low
,
self
.
_low
,
...
@@ -711,7 +711,7 @@ class XavierInitializer(Initializer):
...
@@ -711,7 +711,7 @@ class XavierInitializer(Initializer):
if
self
.
_uniform
:
if
self
.
_uniform
:
limit
=
math
.
sqrt
(
6.0
/
float
(
fan_in
+
fan_out
))
limit
=
math
.
sqrt
(
6.0
/
float
(
fan_in
+
fan_out
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
out_var
.
shape
,
out_var
.
shape
,
out_dtype
,
out_dtype
,
-
limit
,
-
limit
,
...
@@ -923,7 +923,7 @@ class MSRAInitializer(Initializer):
...
@@ -923,7 +923,7 @@ class MSRAInitializer(Initializer):
gain
=
calculate_gain
(
self
.
_nonlinearity
,
self
.
_negative_slope
)
gain
=
calculate_gain
(
self
.
_nonlinearity
,
self
.
_negative_slope
)
limit
=
gain
*
math
.
sqrt
(
3.0
/
float
(
fan_in
))
limit
=
gain
*
math
.
sqrt
(
3.0
/
float
(
fan_in
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
var
.
shape
,
var
.
shape
,
out_dtype
,
out_dtype
,
-
limit
,
-
limit
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
399047d7
...
@@ -5385,7 +5385,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
...
@@ -5385,7 +5385,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
)
)
)
)
if in_dygraph_mode():
if in_dygraph_mode():
return _C_ops.
reduce_
prod(
return _C_ops.prod(
input,
input,
dim if dim != None and dim != [] else [0],
dim if dim != None and dim != [] else [0],
keep_dim,
keep_dim,
...
@@ -15548,7 +15548,7 @@ def where(condition):
...
@@ -15548,7 +15548,7 @@ def where(condition):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return _C_ops.
where_index
(condition)
return _C_ops.
nonzero
(condition)
if _in_legacy_dygraph():
if _in_legacy_dygraph():
return _legacy_C_ops.where_index(condition)
return _legacy_C_ops.where_index(condition)
...
@@ -16567,7 +16567,7 @@ def uniform_random(
...
@@ -16567,7 +16567,7 @@ def uniform_random(
if in_dygraph_mode():
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform
_random
(
return _C_ops.uniform(
shape,
shape,
dtype,
dtype,
float(min),
float(min),
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
399047d7
...
@@ -1017,7 +1017,7 @@ def hsigmoid_loss(
...
@@ -1017,7 +1017,7 @@ def hsigmoid_loss(
# [1.92374969]]
# [1.92374969]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
_
,
_
=
_C_ops
.
h
ierarchical_sigmoid
(
out
,
_
,
_
=
_C_ops
.
h
sigmoid_loss
(
input
,
input
,
weight
,
weight
,
label
,
label
,
...
...
python/paddle/tensor/creation.py
浏览文件 @
399047d7
...
@@ -1097,7 +1097,7 @@ def tril(x, diagonal=0, name=None):
...
@@ -1097,7 +1097,7 @@ def tril(x, diagonal=0, name=None):
# [9 , 10, 0 , 0 ]])
# [9 , 10, 0 , 0 ]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
tril
_triu
(
x
,
diagonal
,
True
)
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
...
@@ -1163,7 +1163,7 @@ def triu(x, diagonal=0, name=None):
...
@@ -1163,7 +1163,7 @@ def triu(x, diagonal=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
tril
_triu
(
x
,
diagonal
,
False
)
return
_C_ops
.
tril
(
x
,
diagonal
,
False
)
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
...
...
python/paddle/tensor/math.py
浏览文件 @
399047d7
...
@@ -3706,7 +3706,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
...
@@ -3706,7 +3706,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
dim
=
[
0
]
dim
=
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
reduce_
prod
(
x
,
dim
,
keepdim
,
reduce_all
)
return
_C_ops
.
prod
(
x
,
dim
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_prod
(
return
_legacy_C_ops
.
reduce_prod
(
x
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
x
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
...
...
python/paddle/tensor/random.py
浏览文件 @
399047d7
...
@@ -584,7 +584,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -584,7 +584,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
uniform
_random
(
return
_C_ops
.
uniform
(
shape
,
shape
,
dtype
,
dtype
,
float
(
min
),
float
(
min
),
...
@@ -664,7 +664,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -664,7 +664,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
uniform_
random_
inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
else
:
else
:
return
_legacy_C_ops
.
uniform_random_inplace_
(
return
_legacy_C_ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
...
...
python/paddle/tensor/search.py
浏览文件 @
399047d7
...
@@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False):
...
@@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False):
rank
=
len
(
shape
)
rank
=
len
(
shape
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
outs
=
_C_ops
.
where_index
(
x
)
outs
=
_C_ops
.
nonzero
(
x
)
elif
paddle
.
in_dynamic_mode
():
elif
paddle
.
in_dynamic_mode
():
outs
=
_legacy_C_ops
.
where_index
(
x
)
outs
=
_legacy_C_ops
.
where_index
(
x
)
else
:
else
:
...
...
python/paddle/vision/ops.py
浏览文件 @
399047d7
...
@@ -197,7 +197,7 @@ def yolo_loss(
...
@@ -197,7 +197,7 @@ def yolo_loss(
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
loss
,
_
,
_
=
_C_ops
.
yolo
v3
_loss
(
loss
,
_
,
_
=
_C_ops
.
yolo_loss
(
x
,
x
,
gt_box
,
gt_box
,
gt_label
,
gt_label
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录