Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
399047d7
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
399047d7
编写于
11月 01, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
11月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PHI]Standardise some C++ API (Part2) (#47510)
* standard_api * add hardtanh
上级
957fbb02
变更
84
展开全部
显示空白变更内容
内联
并排
Showing
84 changed file
with
1437 addition
and
1129 deletion
+1437
-1129
paddle/fluid/framework/new_executor/standalone_executor_test.cc
.../fluid/framework/new_executor/standalone_executor_test.cc
+2
-2
paddle/fluid/operators/detection/yolov3_loss_op.cc
paddle/fluid/operators/detection/yolov3_loss_op.cc
+2
-2
paddle/fluid/operators/hierarchical_sigmoid_op.cc
paddle/fluid/operators/hierarchical_sigmoid_op.cc
+1
-1
paddle/fluid/operators/tril_triu_op.cc
paddle/fluid/operators/tril_triu_op.cc
+1
-1
paddle/fluid/operators/where_index_op.cc
paddle/fluid/operators/where_index_op.cc
+1
-1
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+23
-23
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+46
-46
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+18
-18
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+18
-18
paddle/phi/infermeta/multiary.cc
paddle/phi/infermeta/multiary.cc
+30
-30
paddle/phi/infermeta/multiary.h
paddle/phi/infermeta/multiary.h
+30
-30
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+58
-58
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+8
-8
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+277
-277
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+29
-29
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
+0
-71
paddle/phi/kernels/cpu/hsigmoid_loss_grad.h
paddle/phi/kernels/cpu/hsigmoid_loss_grad.h
+20
-21
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
+71
-0
paddle/phi/kernels/cpu/hsigmoid_loss_kernel.cc
paddle/phi/kernels/cpu/hsigmoid_loss_kernel.cc
+20
-24
paddle/phi/kernels/cpu/nonzero_kernel.cc
paddle/phi/kernels/cpu/nonzero_kernel.cc
+6
-6
paddle/phi/kernels/cpu/prod_grad_kernel.cc
paddle/phi/kernels/cpu/prod_grad_kernel.cc
+3
-3
paddle/phi/kernels/cpu/prod_kernel.cc
paddle/phi/kernels/cpu/prod_kernel.cc
+1
-1
paddle/phi/kernels/cpu/tril_grad_kernel.cc
paddle/phi/kernels/cpu/tril_grad_kernel.cc
+3
-3
paddle/phi/kernels/cpu/tril_kernel.cc
paddle/phi/kernels/cpu/tril_kernel.cc
+3
-3
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
+12
-12
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
+12
-12
paddle/phi/kernels/cpu/uniform_kernel.cc
paddle/phi/kernels/cpu/uniform_kernel.cc
+13
-13
paddle/phi/kernels/cpu/yolo_loss_functor.h
paddle/phi/kernels/cpu/yolo_loss_functor.h
+0
-0
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
+23
-27
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
+18
-18
paddle/phi/kernels/gpu/lstsq_kernel.cu
paddle/phi/kernels/gpu/lstsq_kernel.cu
+3
-3
paddle/phi/kernels/gpu/nonzero_kernel.cu
paddle/phi/kernels/gpu/nonzero_kernel.cu
+6
-6
paddle/phi/kernels/gpu/prod_grad_kernel.cu
paddle/phi/kernels/gpu/prod_grad_kernel.cu
+3
-3
paddle/phi/kernels/gpu/qr_kernel.cu
paddle/phi/kernels/gpu/qr_kernel.cu
+3
-3
paddle/phi/kernels/gpu/tril_grad_kernel.cu
paddle/phi/kernels/gpu/tril_grad_kernel.cu
+3
-3
paddle/phi/kernels/gpu/tril_kernel.cu
paddle/phi/kernels/gpu/tril_kernel.cu
+3
-3
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
+12
-12
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
+12
-12
paddle/phi/kernels/gpu/uniform_kernel.cu
paddle/phi/kernels/gpu/uniform_kernel.cu
+13
-13
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
+0
-42
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
+0
-40
paddle/phi/kernels/hsigmoid_loss_grad_kernel.h
paddle/phi/kernels/hsigmoid_loss_grad_kernel.h
+42
-0
paddle/phi/kernels/hsigmoid_loss_kernel.h
paddle/phi/kernels/hsigmoid_loss_kernel.h
+40
-0
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
+9
-9
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
+3
-3
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
+6
-6
paddle/phi/kernels/impl/tril_kernel_impl.h
paddle/phi/kernels/impl/tril_kernel_impl.h
+6
-6
paddle/phi/kernels/kps/prod_kernel.cu
paddle/phi/kernels/kps/prod_kernel.cu
+1
-1
paddle/phi/kernels/nonzero_kernel.h
paddle/phi/kernels/nonzero_kernel.h
+3
-3
paddle/phi/kernels/prod_grad_kernel.h
paddle/phi/kernels/prod_grad_kernel.h
+8
-8
paddle/phi/kernels/prod_kernel.cc
paddle/phi/kernels/prod_kernel.cc
+1
-1
paddle/phi/kernels/prod_kernel.h
paddle/phi/kernels/prod_kernel.h
+0
-0
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.cc
...le/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.cc
+99
-0
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.h
paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.h
+45
-0
paddle/phi/kernels/selected_rows/uniform_kernel.cc
paddle/phi/kernels/selected_rows/uniform_kernel.cc
+96
-0
paddle/phi/kernels/selected_rows/uniform_kernel.h
paddle/phi/kernels/selected_rows/uniform_kernel.h
+17
-17
paddle/phi/kernels/tril_grad_kernel.h
paddle/phi/kernels/tril_grad_kernel.h
+5
-5
paddle/phi/kernels/tril_kernel.h
paddle/phi/kernels/tril_kernel.h
+11
-11
paddle/phi/kernels/uniform_inplace_grad_kernel.h
paddle/phi/kernels/uniform_inplace_grad_kernel.h
+9
-9
paddle/phi/kernels/uniform_inplace_kernel.h
paddle/phi/kernels/uniform_inplace_kernel.h
+9
-9
paddle/phi/kernels/uniform_kernel.cc
paddle/phi/kernels/uniform_kernel.cc
+14
-16
paddle/phi/kernels/uniform_kernel.h
paddle/phi/kernels/uniform_kernel.h
+17
-17
paddle/phi/kernels/xpu/nonzero_kernel.cc
paddle/phi/kernels/xpu/nonzero_kernel.cc
+5
-5
paddle/phi/kernels/xpu/prod_kernel.cc
paddle/phi/kernels/xpu/prod_kernel.cc
+1
-1
paddle/phi/kernels/xpu/tril_grad_kernel.cc
paddle/phi/kernels/xpu/tril_grad_kernel.cc
+7
-7
paddle/phi/kernels/xpu/tril_kernel.cc
paddle/phi/kernels/xpu/tril_kernel.cc
+7
-8
paddle/phi/kernels/xpu/uniform_kernel.cc
paddle/phi/kernels/xpu/uniform_kernel.cc
+13
-13
paddle/phi/kernels/yolo_loss_grad_kernel.h
paddle/phi/kernels/yolo_loss_grad_kernel.h
+42
-0
paddle/phi/kernels/yolo_loss_kernel.h
paddle/phi/kernels/yolo_loss_kernel.h
+38
-0
paddle/phi/kernels/yolov3_loss_kernel.h
paddle/phi/kernels/yolov3_loss_kernel.h
+0
-38
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
+6
-3
paddle/phi/ops/compat/tril_triu_sig.cc
paddle/phi/ops/compat/tril_triu_sig.cc
+5
-2
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
+4
-2
paddle/phi/ops/compat/uniform_random_sig.cc
paddle/phi/ops/compat/uniform_random_sig.cc
+15
-15
paddle/phi/ops/compat/where_index_sig.cc
paddle/phi/ops/compat/where_index_sig.cc
+27
-0
paddle/phi/ops/compat/yolov3_loss_sig.cc
paddle/phi/ops/compat/yolov3_loss_sig.cc
+5
-2
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+3
-3
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+3
-3
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+1
-1
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+2
-2
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+1
-1
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+2
-2
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+1
-1
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+1
-1
未找到文件。
paddle/fluid/framework/new_executor/standalone_executor_test.cc
浏览文件 @
399047d7
...
...
@@ -63,8 +63,8 @@ USE_OP_ITSELF(memcpy_d2h);
USE_OP_ITSELF
(
fetch_v2
);
PD_DECLARE_KERNEL
(
full
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform_ra
ndom_ra
w
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform
_random
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform_raw
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
uniform
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
transpose
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
reshape
,
GPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
split
,
GPU
,
ALL_LAYOUT
);
...
...
paddle/fluid/operators/detection/yolov3_loss_op.cc
浏览文件 @
399047d7
...
...
@@ -218,10 +218,10 @@ class Yolov3LossGradMaker : public framework::SingleGradOpMaker<T> {
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss
,
Yolov3LossInferShapeFunctor
,
PD_INFER_META
(
phi
::
Yolo
v3
LossInferMeta
));
PD_INFER_META
(
phi
::
YoloLossInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
yolov3_loss_grad
,
Yolov3LossGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
Yolo
v3
LossGradInferMeta
));
PD_INFER_META
(
phi
::
YoloLossGradInferMeta
));
REGISTER_OPERATOR
(
yolov3_loss
,
ops
::
Yolov3LossOp
,
ops
::
Yolov3LossOpMaker
,
...
...
paddle/fluid/operators/hierarchical_sigmoid_op.cc
浏览文件 @
399047d7
...
...
@@ -259,7 +259,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
hierarchical_sigmoid
,
HierarchicalSigmoidInferShapeFunctor
,
PD_INFER_META
(
phi
::
H
ierarchicalSigmoid
InferMeta
));
PD_INFER_META
(
phi
::
H
SigmoidLoss
InferMeta
));
REGISTER_OPERATOR
(
hierarchical_sigmoid
,
ops
::
HierarchicalSigmoidOp
,
ops
::
HierarchicalSigmoidOpMaker
<
int
>
,
...
...
paddle/fluid/operators/tril_triu_op.cc
浏览文件 @
399047d7
...
...
@@ -93,7 +93,7 @@ namespace ops = paddle::operators;
namespace
plat
=
paddle
::
platform
;
DECLARE_INFER_SHAPE_FUNCTOR
(
tril_triu
,
TrilTriuInferShapeFunctor
,
PD_INFER_META
(
phi
::
Tril
Triu
InferMeta
));
PD_INFER_META
(
phi
::
TrilInferMeta
));
REGISTER_OPERATOR
(
tril_triu
,
ops
::
TrilTriuOp
,
ops
::
TrilTriuOpMaker
,
...
...
paddle/fluid/operators/where_index_op.cc
浏览文件 @
399047d7
...
...
@@ -48,7 +48,7 @@ class WhereIndexOpMaker : public framework::OpProtoAndCheckerMaker {
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
where_index
,
WhereIndexInferShapeFunctor
,
PD_INFER_META
(
phi
::
WhereIndex
InferMeta
));
PD_INFER_META
(
phi
::
NonZero
InferMeta
));
REGISTER_OPERATOR
(
where_index
,
ops
::
WhereIndexOp
,
...
...
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
399047d7
...
...
@@ -791,8 +791,8 @@
func
:
hard_tanh_grad
inplace
:
(out_grad -> x_grad)
-
backward_op
:
h
ierarchical_sigmoid
_grad
forward
:
h
ierarchical_sigmoid
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
-
backward_op
:
h
sigmoid_loss
_grad
forward
:
h
sigmoid_loss
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta
:
...
...
@@ -800,7 +800,7 @@
param
:
[
x
,
w
,
bias
]
optional
:
path, code, bias
kernel
:
func
:
h
ierarchical_sigmoid
_grad
func
:
h
sigmoid_loss
_grad
-
backward_op
:
huber_loss_grad
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
...
...
@@ -1477,6 +1477,16 @@
kernel
:
func
:
prelu_grad
-
backward_op
:
prod_grad
forward
:
prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_op
:
psroi_pool_grad
forward
:
psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
...
...
@@ -1516,16 +1526,6 @@
output
:
Tensor(x_grad)
invoke
:
real_grad_impl(out_grad, x_grad)
-
backward_op
:
reduce_prod_grad
forward
:
reduce_prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_op
:
relu6_grad
forward
:
relu6 (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float threshold)
...
...
@@ -2234,15 +2234,15 @@
kernel
:
func
:
triangular_solve_grad
-
backward_op
:
tril_
triu_
grad
forward
:
tril
_triu
(Tensor x, int diagonal, bool lower) -> Tensor(out)
-
backward_op
:
tril_grad
forward
:
tril(Tensor x, int diagonal, bool lower) -> Tensor(out)
args
:
(Tensor out_grad, int diagonal, bool lower)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
tril_
triu_
grad
func
:
tril_grad
-
backward_op
:
trilinear_interp_grad
forward
:
trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
...
...
@@ -2273,14 +2273,14 @@
func
:
unfold_grad
no_need_buffer
:
x
-
backward_op
:
uniform_
random_
inplace_grad
forward
:
uniform_
random_
inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
-
backward_op
:
uniform_inplace_grad
forward
:
uniform_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
args
:
(Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UniformRandomInplaceGradInferMeta
kernel
:
func
:
uniform_
random_
inplace_grad
func
:
uniform_inplace_grad
inplace
:
(out_grad -> x_grad)
-
backward_op
:
unsqueeze_double_grad
...
...
@@ -2335,14 +2335,14 @@
func
:
where_grad
no_need_buffer
:
x, y
-
backward_op
:
yolo
v3
_loss_grad
forward
:
yolo
v3
_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
-
backward_op
:
yolo_loss_grad
forward
:
yolo_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output
:
Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad)
infer_meta
:
func
:
Yolo
v3
LossGradInferMeta
func
:
YoloLossGradInferMeta
kernel
:
func
:
yolo
v3
_loss_grad
func
:
yolo_loss_grad
optional
:
gt_score
-
backward_op
:
fold_grad
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
399047d7
...
...
@@ -1036,17 +1036,6 @@
func
:
hard_tanh
backward
:
hardtanh_grad
-
op
:
hierarchical_sigmoid
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta
:
func
:
HierarchicalSigmoidInferMeta
optional
:
path, code, bias
kernel
:
func
:
hierarchical_sigmoid
data_type
:
x
backward
:
hierarchical_sigmoid_grad
-
op
:
histogram
args
:
(Tensor input, int64_t bins, int min, int max)
output
:
Tensor(out)
...
...
@@ -1055,6 +1044,17 @@
kernel
:
func
:
histogram
-
op
:
hsigmoid_loss
args
:
(Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output
:
Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta
:
func
:
HSigmoidLossInferMeta
optional
:
path, code, bias
kernel
:
func
:
hsigmoid_loss
data_type
:
x
backward
:
hsigmoid_loss_grad
-
op
:
huber_loss
args
:
(Tensor input, Tensor label, float delta)
output
:
Tensor(out), Tensor(residual)
...
...
@@ -1696,6 +1696,14 @@
func
:
nms
data_type
:
x
-
op
:
nonzero
args
:
(Tensor condition)
output
:
Tensor(out)
infer_meta
:
func
:
NonZeroInferMeta
kernel
:
func
:
nonzero
-
op
:
norm
args
:
(Tensor x, int axis, float epsilon, bool is_test)
output
:
Tensor(out), Tensor(norm)
...
...
@@ -1828,6 +1836,15 @@
kernel
:
func
:
prior_box
-
op
:
prod
args
:
(Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceIntArrayAxisInferMetaBase
kernel
:
func
:
prod_raw
backward
:
prod_grad
-
op
:
psroi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor
...
...
@@ -1893,15 +1910,6 @@
func
:
real
backward
:
real_grad
-
op
:
reduce_prod
args
:
(Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceIntArrayAxisInferMetaBase
kernel
:
func
:
prod_raw
backward
:
reduce_prod_grad
-
op
:
relu
args
:
(Tensor x)
output
:
Tensor(out)
...
...
@@ -2460,6 +2468,15 @@
func
:
triangular_solve
backward
:
triangular_solve_grad
-
op
:
tril
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilInferMeta
kernel
:
func
:
tril
backward
:
tril_grad
-
op
:
tril_indices
args
:
(int rows, int cols, int offset, DataType dtype, Place place={})
output
:
Tensor(out)
...
...
@@ -2472,15 +2489,6 @@
data_type
:
dtype
backend
:
place
-
op
:
tril_triu
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilTriuInferMeta
kernel
:
func
:
tril_triu
backward
:
tril_triu_grad
-
op
:
trilinear_interp
args
:
(Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output
:
Tensor(output)
...
...
@@ -2535,14 +2543,14 @@
func
:
unfold
backward
:
unfold_grad
-
op
:
uniform
_random
-
op
:
uniform
args
:
(IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
UniformRandomInferMeta
param
:
[
shape
,
dtype
]
kernel
:
func
:
uniform
_random
func
:
uniform
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
data_type
:
dtype
backend
:
place
...
...
@@ -2628,14 +2636,6 @@
func
:
where
backward
:
where_grad
-
op
:
where_index
args
:
(Tensor condition)
output
:
Tensor(out)
infer_meta
:
func
:
WhereIndexInferMeta
kernel
:
func
:
where_index
-
op
:
yolo_box
args
:
(Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
output
:
Tensor(boxes), Tensor(scores)
...
...
@@ -2645,16 +2645,16 @@
func
:
yolo_box
data_type
:
x
-
op
:
yolo
v3
_loss
-
op
:
yolo_loss
args
:
(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output
:
Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
infer_meta
:
func
:
Yolo
v3
LossInferMeta
func
:
YoloLossInferMeta
kernel
:
func
:
yolo
v3
_loss
func
:
yolo_loss
data_type
:
x
optional
:
gt_score
backward
:
yolo
v3
_loss_grad
backward
:
yolo_loss_grad
-
op
:
zeros
args
:
(IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
...
...
@@ -2734,16 +2734,16 @@
intermediate
:
reserve
view
:
(dropout_state_in -> dropout_state_out)
-
op
:
uniform_
random_
inplace
-
op
:
uniform_inplace
args
:
(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output
:
Tensor(out)
infer_meta
:
func
:
UniformRandomInplaceInferMeta
kernel
:
func
:
uniform_
random_
inplace
func
:
uniform_inplace
data_type
:
x
inplace
:
(x -> out)
backward
:
uniform_
random_
inplace_grad
backward
:
uniform_inplace_grad
-
op
:
unpool
args
:
(Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
399047d7
...
...
@@ -987,7 +987,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
x_grad
->
set_dtype
(
out_grad
[
0
]
->
dtype
());
}
void
Yolo
v3
LossGradInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
399047d7
...
...
@@ -385,7 +385,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int
axis
,
MetaTensor
*
x_grad
);
void
Yolo
v3
LossGradInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
...
...
paddle/phi/infermeta/multiary.cc
浏览文件 @
399047d7
...
...
@@ -1328,7 +1328,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
out_count
->
set_dtype
(
DataType
::
INT32
);
}
void
H
ierarchicalSigmoid
InferMeta
(
const
MetaTensor
&
x
,
void
H
SigmoidLoss
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
w
,
const
MetaTensor
&
label
,
const
MetaTensor
&
path
,
...
...
@@ -2762,7 +2762,7 @@ void WhereInferMeta(const MetaTensor& condition,
out
->
share_meta
(
x
);
}
void
Yolo
v3
LossInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
...
...
paddle/phi/infermeta/multiary.h
浏览文件 @
399047d7
...
...
@@ -288,7 +288,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
MetaTensor
*
out_count
,
MetaTensor
*
out_eids
);
void
H
ierarchicalSigmoid
InferMeta
(
const
MetaTensor
&
x
,
void
H
SigmoidLoss
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
w
,
const
MetaTensor
&
label
,
const
MetaTensor
&
path
,
...
...
@@ -508,7 +508,7 @@ void WhereInferMeta(const MetaTensor& condition,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
Yolo
v3
LossInferMeta
(
const
MetaTensor
&
x
,
void
YoloLossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
gt_box
,
const
MetaTensor
&
gt_label
,
const
MetaTensor
&
gt_score
,
...
...
paddle/phi/infermeta/ternary.cc
浏览文件 @
399047d7
...
...
@@ -402,64 +402,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
}
}
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
dims_
=
phi
::
vectorize
(
dims
);
dims_
[
0
]
=
-
1
;
out
->
set_dims
(
phi
::
make_ddim
(
dims_
));
out
->
set_dtype
(
x
.
dtype
());
if
(
reduce_op
==
"MEAN"
)
{
dst_count
->
set_dims
({
-
1
});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
bias
,
...
...
@@ -1164,6 +1106,64 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
out
->
set_dtype
(
x
.
dtype
());
}
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
dims_
=
phi
::
vectorize
(
dims
);
dims_
[
0
]
=
-
1
;
out
->
set_dims
(
phi
::
make_ddim
(
dims_
));
out
->
set_dtype
(
x
.
dtype
());
if
(
reduce_op
==
"MEAN"
)
{
dst_count
->
set_dims
({
-
1
});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
const
MetaTensor
&
u
,
const
MetaTensor
&
v
,
...
...
paddle/phi/infermeta/ternary.h
浏览文件 @
399047d7
...
...
@@ -72,14 +72,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
MetaTensor
*
saved_variance
,
MetaConfig
config
=
MetaConfig
());
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
GroupNormInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
scale
,
const
MetaTensor
&
bias
,
...
...
@@ -186,6 +178,14 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
const
MetaTensor
&
updates
,
MetaTensor
*
out
);
void
SendURecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
reduce_op
,
const
IntArray
&
out_size
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
SpectralNormInferMeta
(
const
MetaTensor
&
weight
,
const
MetaTensor
&
u
,
const
MetaTensor
&
v
,
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
399047d7
此差异已折叠。
点击以展开。
paddle/phi/infermeta/unary.h
浏览文件 @
399047d7
...
...
@@ -65,6 +65,11 @@ void BatchSizeLikeInferMeta(const MetaTensor& x,
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
ChannelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
groups
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
void
ClassCenterSampleInferMeta
(
const
MetaTensor
&
label
,
...
...
@@ -191,6 +196,14 @@ void FlipInferMeta(const MetaTensor& x,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
FoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
output_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
);
void
FrameInferMeta
(
const
MetaTensor
&
x
,
int
frame_length
,
int
hop_length
,
...
...
@@ -214,6 +227,8 @@ void GumbelSoftmaxInferMeta(const MetaTensor& x,
void
HistogramInferMeta
(
const
MetaTensor
&
input
,
int64_t
bins
,
int
min
,
int
max
,
MetaTensor
*
out
);
void
IdentityLossInferMeta
(
const
MetaTensor
&
x
,
int
reduction
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
...
@@ -288,6 +303,8 @@ void NanmedianInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
median_index
);
void
NonZeroInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
NMSInferMeta
(
const
MetaTensor
&
x
,
float
threshold
,
MetaTensor
*
out
);
void
NormInferMeta
(
const
MetaTensor
&
x
,
...
...
@@ -297,6 +314,14 @@ void NormInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
norm
);
void
OneHotRawInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
DataType
dtype
,
bool
allow_out_of_range
,
MetaTensor
*
out
);
void
OneHotInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
MetaTensor
*
out
);
void
OverlapAddInferMeta
(
const
MetaTensor
&
x
,
int
hop_length
,
int
axis
,
...
...
@@ -576,7 +601,7 @@ void TransposeGradInferMeta(const MetaTensor& x,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
Tril
Triu
InferMeta
(
const
MetaTensor
&
x
,
void
TrilInferMeta
(
const
MetaTensor
&
x
,
int
diagonal
,
bool
lower
,
MetaTensor
*
out
);
...
...
@@ -657,29 +682,4 @@ void UnStackInferMeta(const MetaTensor& x,
int
num
,
std
::
vector
<
MetaTensor
*>
outs
);
void
OneHotRawInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
DataType
dtype
,
bool
allow_out_of_range
,
MetaTensor
*
out
);
void
OneHotInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
depth
,
MetaTensor
*
out
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ChannelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
groups
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IdentityLossInferMeta
(
const
MetaTensor
&
x
,
int
reduction
,
MetaTensor
*
out
);
void
FoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
output_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
);
}
// namespace phi
paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
)
{
HierarchicalSigmoidGradKernelImpl
<
T
>
(
ctx
,
x
,
w
,
label
,
path
,
code
,
bias
,
pre_out
,
out_grad
,
num_classes
,
remote_prefetch
,
trainer_id
,
height_sections
,
epmap
,
table_names
,
is_sparse
,
x_grad
,
w_grad
,
bias_grad
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
hierarchical_sigmoid_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
HierarchicalSigmoidGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_grad.h
→
paddle/phi/kernels/cpu/h
sigmoid_loss
_grad.h
浏览文件 @
399047d7
...
...
@@ -26,8 +26,7 @@ namespace phi {
namespace
math
=
paddle
::
operators
::
math
;
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernelImpl
(
const
Context
&
ctx
,
void
HSigmoidLossGradKernelImpl
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
...
...
paddle/phi/kernels/cpu/hsigmoid_loss_grad_kernel.cc
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hsigmoid_loss_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hsigmoid_loss_grad.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HSigmoidLossGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
)
{
HSigmoidLossGradKernelImpl
<
T
>
(
ctx
,
x
,
w
,
label
,
path
,
code
,
bias
,
pre_out
,
out_grad
,
num_classes
,
remote_prefetch
,
trainer_id
,
height_sections
,
epmap
,
table_names
,
is_sparse
,
x_grad
,
w_grad
,
bias_grad
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
hsigmoid_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
HSigmoidLossGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_kernel.cc
→
paddle/phi/kernels/cpu/h
sigmoid_loss
_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/h
ierarchical_sigmoid
_kernel.h"
#include "paddle/phi/kernels/h
sigmoid_loss
_kernel.h"
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include "paddle/fluid/platform/transform.h"
...
...
@@ -28,7 +28,7 @@ namespace phi {
namespace
math
=
paddle
::
operators
::
math
;
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
Kernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
...
...
@@ -106,9 +106,5 @@ void HierarchicalSigmoidKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
hierarchical_sigmoid
,
CPU
,
ALL_LAYOUT
,
phi
::
HierarchicalSigmoidKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
hsigmoid_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
HSigmoidLossKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/
where_index
_kernel.cc
→
paddle/phi/kernels/cpu/
nonzero
_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
@@ -47,7 +47,7 @@ struct WhereIndexFunctor {
};
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
const
T
*
cond_data
=
condition
.
data
<
T
>
();
...
...
@@ -83,10 +83,10 @@ void WhereIndexKernel(const Context& dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
where_index
,
PD_REGISTER_KERNEL
(
nonzero
,
CPU
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
phi
::
NonZero
Kernel
,
int64_t
,
int
,
int16_t
,
...
...
paddle/phi/kernels/cpu/
reduce_
prod_grad_kernel.cc
→
paddle/phi/kernels/cpu/prod_grad_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL
(
prod_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Reduce
ProdGradKernel
,
phi
::
ProdGradKernel
,
float
,
double
,
int
,
...
...
paddle/phi/kernels/cpu/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/cpu/prod_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
paddle/phi/kernels/cpu/tril_
triu
_kernel.cc
→
paddle/phi/kernels/cpu/tril_
grad
_kernel.cc
浏览文件 @
399047d7
...
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu
_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
grad
_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril_
triu
,
PD_REGISTER_KERNEL
(
tril_
grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Tril
Triu
Kernel
,
phi
::
Tril
Grad
Kernel
,
bool
,
float
,
double
,
...
...
paddle/phi/kernels/cpu/tril_
triu_grad_
kernel.cc
→
paddle/phi/kernels/cpu/tril_kernel.cc
浏览文件 @
399047d7
...
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu_grad_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril
_triu_grad
,
PD_REGISTER_KERNEL
(
tril
,
CPU
,
ALL_LAYOUT
,
phi
::
Tril
TriuGrad
Kernel
,
phi
::
TrilKernel
,
bool
,
float
,
double
,
...
...
paddle/phi/kernels/cpu/uniform_
random_
inplace_grad_kernel.cc
→
paddle/phi/kernels/cpu/uniform_inplace_grad_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_grad_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceGradKernel
(
const
Context
&
ctx
,
void
UniformInplaceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
float
min
,
float
max
,
...
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace_grad
,
PD_REGISTER_KERNEL
(
uniform_inplace_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceGradKernel
,
phi
::
UniformInplaceGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/uniform_
random_
inplace_kernel.cc
→
paddle/phi/kernels/cpu/uniform_inplace_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceKernel
(
const
Context
&
ctx
,
void
UniformInplaceKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
float
min
,
float
max
,
...
...
@@ -46,9 +46,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace
,
PD_REGISTER_KERNEL
(
uniform_inplace
,
CPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceKernel
,
phi
::
UniformInplaceKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/uniform_
random_
kernel.cc
→
paddle/phi/kernels/cpu/uniform_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/uniform_real_distribution.h"
...
...
@@ -20,7 +20,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -63,10 +63,10 @@ void UniformRandomRawKernel(const Context &dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w
,
PD_REGISTER_KERNEL
(
uniform_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
UniformRa
ndomRa
wKernel
,
phi
::
UniformRawKernel
,
float
,
double
,
phi
::
dtype
::
bfloat16
)
{}
paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h
→
paddle/phi/kernels/cpu/yolo_loss_functor.h
浏览文件 @
399047d7
文件已移动
paddle/phi/kernels/cpu/yolo
v3
_loss_grad_kernel.cc
→
paddle/phi/kernels/cpu/yolo_loss_grad_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/yolo
v3
_loss_grad_kernel.h"
#include "paddle/phi/kernels/yolo_loss_grad_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h"
#include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
...
...
@@ -117,7 +117,7 @@ static inline void CalcObjnessLossGrad(T* input_grad,
}
template
<
typename
T
,
typename
Context
>
void
Yolo
v3
LossGradKernel
(
const
Context
&
dev_ctx
,
void
YoloLossGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
...
...
@@ -237,9 +237,5 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
yolov3_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Yolov3LossGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
yolo_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
YoloLossGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/yolo
v3
_loss_kernel.cc
→
paddle/phi/kernels/cpu/yolo_loss_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/yolo
v3
_loss_kernel.h"
#include "paddle/phi/kernels/yolo_loss_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolo
v3
_loss_functor.h"
#include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
phi
{
...
...
@@ -178,7 +178,7 @@ static void inline GtValid(bool* valid,
}
template
<
typename
T
,
typename
Context
>
void
Yolo
v3
LossKernel
(
const
Context
&
dev_ctx
,
void
YoloLossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
...
...
@@ -371,4 +371,4 @@ void Yolov3LossKernel(const Context& dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
yolo
v3_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
Yolov3
LossKernel
,
float
,
double
)
{}
yolo
_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
Yolo
LossKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/lstsq_kernel.cu
浏览文件 @
399047d7
...
...
@@ -23,7 +23,7 @@
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/impl/lstsq_kernel_impl.h"
#include "paddle/phi/kernels/impl/qr_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
triu_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
#include "paddle/phi/kernels/lstsq_kernel.h"
#include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
...
...
@@ -110,7 +110,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor
*
res_r
=
new
DenseTensor
();
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
phi
::
Tril
Triu
Kernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TrilKernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
DenseTensor
trans_y
=
phi
::
TransposeLast2Dim
<
T
>
(
dev_ctx
,
tmp_y
);
DenseTensor
slice_y
=
...
...
@@ -135,7 +135,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor
*
res_r
=
new
DenseTensor
();
res_r
->
Resize
(
phi
::
make_ddim
({
batch_count
,
min_mn
,
min_mn
}));
dev_ctx
.
template
Alloc
<
T
>(
res_r
);
phi
::
Tril
Triu
Kernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TrilKernel
<
T
>
(
dev_ctx
,
slice_r
,
0
,
false
,
res_r
);
phi
::
TriangularSolveKernel
<
T
,
Context
>
(
dev_ctx
,
*
res_r
,
*
new_y
,
true
,
true
,
false
,
solution
);
...
...
paddle/phi/kernels/gpu/
where_index
_kernel.cu
→
paddle/phi/kernels/gpu/
nonzero
_kernel.cu
浏览文件 @
399047d7
...
...
@@ -25,7 +25,7 @@ namespace cub = hipcub;
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/select_impl.cu.h"
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
namespace
phi
{
template
<
typename
MaskT
,
typename
IndexT
,
typename
OutT
>
...
...
@@ -62,7 +62,7 @@ struct IndexFunctor {
};
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
DenseTensor
in_data
;
...
...
@@ -74,10 +74,10 @@ void WhereIndexKernel(const Context &dev_ctx,
}
}
// namespace phi
PD_REGISTER_KERNEL
(
where_index
,
PD_REGISTER_KERNEL
(
nonzero
,
GPU
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
phi
::
NonZero
Kernel
,
int64_t
,
int
,
int16_t
,
...
...
paddle/phi/kernels/gpu/
reduce_
prod_grad_kernel.cu
→
paddle/phi/kernels/gpu/prod_grad_kernel.cu
浏览文件 @
399047d7
...
...
@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL
(
prod_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Reduce
ProdGradKernel
,
phi
::
ProdGradKernel
,
float
,
double
,
int
,
...
...
paddle/phi/kernels/gpu/qr_kernel.cu
浏览文件 @
399047d7
...
...
@@ -31,7 +31,7 @@
#include "paddle/phi/kernels/qr_kernel.h"
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
...
...
@@ -103,12 +103,12 @@ void QrKernel(const Context& ctx,
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
sliced_qr
=
SliceKernel
<
T
,
Context
>
(
ctx
,
trans_qr
,
{
trans_qr
.
dims
().
size
()
-
2
},
{
0
},
{
min_mn
},
{
1
},
{});
auto
tmp_r
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
sliced_qr
,
0
,
false
);
auto
tmp_r
=
Tril
<
T
,
Context
>
(
ctx
,
sliced_qr
,
0
,
false
);
// Transpose 'tmp_r' to retore the original row-major order
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
}
else
{
auto
trans_qr
=
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
qr
);
auto
tmp_r
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
trans_qr
,
0
,
false
);
auto
tmp_r
=
Tril
<
T
,
Context
>
(
ctx
,
trans_qr
,
0
,
false
);
// Transpose 'tmp_r' to retore the original row-major order
phi
::
Copy
(
ctx
,
tmp_r
,
r
->
place
(),
false
,
r
);
}
...
...
paddle/phi/kernels/gpu/tril_
triu
_kernel.cu
→
paddle/phi/kernels/gpu/tril_
grad
_kernel.cu
浏览文件 @
399047d7
...
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu
_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_
grad
_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril_
triu
,
PD_REGISTER_KERNEL
(
tril_
grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Tril
Triu
Kernel
,
phi
::
Tril
Grad
Kernel
,
bool
,
float
,
double
,
...
...
paddle/phi/kernels/gpu/tril_
triu_grad_
kernel.cu
→
paddle/phi/kernels/gpu/tril_kernel.cu
浏览文件 @
399047d7
...
...
@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_
triu_grad_
kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL
(
tril
_triu_grad
,
PD_REGISTER_KERNEL
(
tril
,
GPU
,
ALL_LAYOUT
,
phi
::
Tril
TriuGrad
Kernel
,
phi
::
TrilKernel
,
bool
,
float
,
double
,
...
...
paddle/phi/kernels/gpu/uniform_
random_
inplace_grad_kernel.cu
→
paddle/phi/kernels/gpu/uniform_inplace_grad_kernel.cu
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_grad_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h"
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceGradKernel
(
const
Context
&
ctx
,
void
UniformInplaceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
float
min
,
float
max
,
...
...
@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace_grad
,
PD_REGISTER_KERNEL
(
uniform_inplace_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceGradKernel
,
phi
::
UniformInplaceGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/uniform_
random_
inplace_kernel.cu
→
paddle/phi/kernels/gpu/uniform_inplace_kernel.cu
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
inplace_kernel.h"
#include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include <thrust/random.h>
...
...
@@ -54,7 +54,7 @@ struct UniformGenerator {
};
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
InplaceKernel
(
const
Context
&
ctx
,
void
UniformInplaceKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
float
min
,
float
max
,
...
...
@@ -80,9 +80,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_
random_
inplace
,
PD_REGISTER_KERNEL
(
uniform_inplace
,
GPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
InplaceKernel
,
phi
::
UniformInplaceKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/uniform_
random_
kernel.cu
→
paddle/phi/kernels/gpu/uniform_kernel.cu
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include <thrust/random.h>
...
...
@@ -54,7 +54,7 @@ struct UniformGenerator {
};
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -86,10 +86,10 @@ void UniformRandomRawKernel(const Context& dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w
,
PD_REGISTER_KERNEL
(
uniform_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
UniformRa
ndomRa
wKernel
,
phi
::
UniformRawKernel
,
float
,
double
,
phi
::
dtype
::
float16
)
{}
paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
);
}
// namespace phi
paddle/phi/kernels/hierarchical_sigmoid_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HierarchicalSigmoidKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
out
,
DenseTensor
*
pre_out
,
DenseTensor
*
w_out
);
}
// namespace phi
paddle/phi/kernels/
yolov3
_loss_grad_kernel.h
→
paddle/phi/kernels/
hsigmoid
_loss_grad_kernel.h
浏览文件 @
399047d7
...
...
@@ -19,24 +19,24 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Yolov3LossGradKernel
(
const
Context
&
dev_
ctx
,
void
HSigmoidLossGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
DenseTensor
&
objectness_mask
,
const
DenseTensor
&
gt_match_mask
,
const
DenseTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
const
DenseTensor
&
pre_out
,
const
DenseTensor
&
out_grad
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
x_grad
,
DenseTensor
*
gt_box_grad
,
DenseTensor
*
gt_label_grad
,
DenseTensor
*
gt_score_grad
);
DenseTensor
*
w_grad
,
DenseTensor
*
bias_grad
);
}
// namespace phi
paddle/phi/kernels/hsigmoid_loss_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
HSigmoidLossKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
const
paddle
::
optional
<
DenseTensor
>&
path
,
const
paddle
::
optional
<
DenseTensor
>&
code
,
const
paddle
::
optional
<
DenseTensor
>&
bias
,
int
num_classes
,
bool
remote_prefetch
,
int
trainer_id
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
std
::
string
>&
table_names
,
bool
is_sparse
,
DenseTensor
*
out
,
DenseTensor
*
pre_out
,
DenseTensor
*
w_out
);
}
// namespace phi
paddle/phi/kernels/impl/
reduce_
prod_grad_kernel_impl.h
→
paddle/phi/kernels/impl/prod_grad_kernel_impl.h
浏览文件 @
399047d7
...
...
@@ -17,12 +17,12 @@
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/impl/reduce_grad.h"
#include "paddle/phi/kernels/
reduce_
prod_grad_kernel.h"
#include "paddle/phi/kernels/prod_grad_kernel.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Reduce
ProdGradKernel
(
const
Context
&
dev_ctx
,
void
ProdGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out_grad
,
...
...
paddle/phi/kernels/impl/qr_grad_kernel_impl.h
浏览文件 @
399047d7
...
...
@@ -29,7 +29,7 @@
#include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
...
...
@@ -116,8 +116,8 @@ void QrGradKernel(const Context& ctx,
DenseTensor
M_tmp1
=
Subtract
<
T
,
Context
>
(
ctx
,
R_term
,
Q_term
);
// Compute M = (tril(M) + tril(M).mH()) * 0.5 Identity
DenseTensor
M_tril_0
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
M_tmp1
,
0
,
true
);
DenseTensor
M_tril_1
=
Tril
Triu
<
T
,
Context
>
(
ctx
,
M_tmp1
,
-
1
,
true
);
DenseTensor
M_tril_0
=
Tril
<
T
,
Context
>
(
ctx
,
M_tmp1
,
0
,
true
);
DenseTensor
M_tril_1
=
Tril
<
T
,
Context
>
(
ctx
,
M_tmp1
,
-
1
,
true
);
DenseTensor
M
=
Add
<
T
,
Context
>
(
ctx
,
M_tril_0
,
TransposeLast2Dim
<
T
,
Context
>
(
ctx
,
M_tril_1
));
...
...
paddle/phi/kernels/impl/tril_
triu_
grad_kernel_impl.h
→
paddle/phi/kernels/impl/tril_grad_kernel_impl.h
浏览文件 @
399047d7
...
...
@@ -16,12 +16,12 @@
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_
triu_
grad_kernel.h"
#include "paddle/phi/kernels/tril_grad_kernel.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
bool
lower
,
...
...
paddle/phi/kernels/impl/tril_
triu_
kernel_impl.h
→
paddle/phi/kernels/impl/tril_kernel_impl.h
浏览文件 @
399047d7
...
...
@@ -16,12 +16,12 @@
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
int
diagonal
,
bool
lower
,
...
...
paddle/phi/kernels/kps/
reduce_
prod_kernel.cu
→
paddle/phi/kernels/kps/prod_kernel.cu
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/reduce.h"
...
...
paddle/phi/kernels/
where_index
_kernel.h
→
paddle/phi/kernels/
nonzero
_kernel.h
浏览文件 @
399047d7
...
...
@@ -19,7 +19,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
);
...
...
paddle/phi/kernels/
reduce_
prod_grad_kernel.h
→
paddle/phi/kernels/prod_grad_kernel.h
浏览文件 @
399047d7
...
...
@@ -20,7 +20,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Reduce
ProdGradKernel
(
const
Context
&
dev_ctx
,
void
ProdGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out
,
const
DenseTensor
&
out_grad
,
...
...
paddle/phi/kernels/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/prod_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
paddle/phi/kernels/
reduce_
prod_kernel.h
→
paddle/phi/kernels/prod_kernel.h
浏览文件 @
399047d7
文件已移动
paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.cc
→
paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.h"
#include "paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/h
ierarchical_sigmoid
_grad.h"
#include "paddle/phi/kernels/cpu/h
sigmoid_loss
_grad.h"
namespace
phi
{
namespace
sr
{
...
...
@@ -36,7 +36,7 @@ static std::vector<int64_t> PathToRows(const DenseTensor& path) {
}
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
GradKernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
GradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
...
...
@@ -66,7 +66,7 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
phi
::
DDim
temp_dim
(
w
.
dims
());
temp_dim
[
0
]
=
real_rows
.
size
();
w_grad_value
->
Resize
(
temp_dim
);
phi
::
H
ierarchicalSigmoid
GradKernelImpl
<
T
>
(
ctx
,
phi
::
H
SigmoidLoss
GradKernelImpl
<
T
>
(
ctx
,
x
,
w
,
label
,
...
...
@@ -91,9 +91,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
}
// namespace sr
}
// namespace phi
PD_REGISTER_KERNEL
(
h
ierarchical_sigmoid
_grad_sr
,
PD_REGISTER_KERNEL
(
h
sigmoid_loss
_grad_sr
,
CPU
,
ALL_LAYOUT
,
phi
::
sr
::
H
ierarchicalSigmoid
GradKernel
,
phi
::
sr
::
H
SigmoidLoss
GradKernel
,
float
,
double
)
{}
paddle/phi/kernels/selected_rows/h
ierarchical_sigmoid
_grad_kernel.h
→
paddle/phi/kernels/selected_rows/h
sigmoid_loss
_grad_kernel.h
浏览文件 @
399047d7
...
...
@@ -21,7 +21,7 @@ namespace phi {
namespace
sr
{
template
<
typename
T
,
typename
Context
>
void
H
ierarchicalSigmoid
GradKernel
(
const
Context
&
ctx
,
void
H
SigmoidLoss
GradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
w
,
const
DenseTensor
&
label
,
...
...
paddle/phi/kernels/selected_rows/uniform_
random_
kernel.cc
→
paddle/phi/kernels/selected_rows/uniform_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,18 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/selected_rows/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/selected_rows/uniform_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
namespace
phi
{
namespace
sr
{
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -33,7 +33,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
int
diag_step
,
float
diag_val
,
SelectedRows
*
out
)
{
phi
::
UniformRa
ndomRa
wKernel
<
T
>
(
dev_ctx
,
phi
::
UniformRawKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
...
...
@@ -46,61 +46,51 @@ void UniformRandomRawKernel(const Context& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
max
,
int
seed
,
SelectedRows
*
out
)
{
phi
::
Uniform
Random
Kernel
<
T
>
(
phi
::
UniformKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
out
->
mutable_value
());
}
}
// namespace sr
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_ra
ndom_ra
w_sr
,
PD_REGISTER_KERNEL
(
uniform_raw_sr
,
CPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRa
ndomRa
wKernel
,
phi
::
sr
::
UniformRawKernel
,
float
,
double
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
uniform_
random_
sr
,
PD_REGISTER_KERNEL
(
uniform_sr
,
CPU
,
ALL_LAYOUT
,
phi
::
sr
::
Uniform
Random
Kernel
,
phi
::
sr
::
UniformKernel
,
float
,
double
,
phi
::
dtype
::
bfloat16
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL
(
uniform_random_raw_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomRawKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
uniform_raw_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRawKernel
,
float
,
double
)
{
}
PD_REGISTER_KERNEL
(
uniform_random_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
uniform_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformKernel
,
float
,
double
)
{}
#endif
#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL
(
uniform_random_raw_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomRawKernel
,
float
)
{}
PD_REGISTER_KERNEL
(
uniform_random_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRandomKernel
,
float
)
{}
uniform_raw_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformRawKernel
,
float
)
{}
PD_REGISTER_KERNEL
(
uniform_sr
,
XPU
,
ALL_LAYOUT
,
phi
::
sr
::
UniformKernel
,
float
)
{
}
#endif
paddle/phi/kernels/selected_rows/uniform_
random_
kernel.h
→
paddle/phi/kernels/selected_rows/uniform_kernel.h
浏览文件 @
399047d7
...
...
@@ -22,7 +22,7 @@ namespace phi {
namespace
sr
{
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
SelectedRows
*
out
);
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
paddle/phi/kernels/tril_
triu_
grad_kernel.h
→
paddle/phi/kernels/tril_grad_kernel.h
浏览文件 @
399047d7
...
...
@@ -19,7 +19,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
bool
lower
,
...
...
paddle/phi/kernels/tril_
triu_
kernel.h
→
paddle/phi/kernels/tril_kernel.h
浏览文件 @
399047d7
...
...
@@ -20,21 +20,21 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
int
diagonal
,
bool
lower
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
DenseTensor
Tril
Triu
(
const
Context
&
ctx
,
DenseTensor
Tril
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
int
diagonal
,
bool
lower
)
{
DenseTensor
dense_out
;
MetaTensor
meta_out
(
&
dense_out
);
Tril
Triu
InferMeta
(
x
,
diagonal
,
lower
,
&
meta_out
);
Tril
Triu
Kernel
<
T
,
Context
>
(
ctx
,
x
,
diagonal
,
lower
,
&
dense_out
);
TrilInferMeta
(
x
,
diagonal
,
lower
,
&
meta_out
);
TrilKernel
<
T
,
Context
>
(
ctx
,
x
,
diagonal
,
lower
,
&
dense_out
);
return
dense_out
;
}
...
...
paddle/phi/kernels/uniform_
random_inplace
_kernel.h
→
paddle/phi/kernels/uniform_
inplace_grad
_kernel.h
浏览文件 @
399047d7
...
...
@@ -19,14 +19,14 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
RandomInplace
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
void
Uniform
InplaceGrad
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
float
min
,
float
max
,
int
seed
,
int
diag_num
,
int
diag_step
,
float
diag_val
,
DenseTensor
*
out
);
DenseTensor
*
x_grad
);
}
// namespace phi
paddle/phi/kernels/uniform_
random_inplace_grad
_kernel.h
→
paddle/phi/kernels/uniform_
inplace
_kernel.h
浏览文件 @
399047d7
...
...
@@ -19,14 +19,14 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
RandomInplaceGrad
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
void
Uniform
Inplace
Kernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
float
min
,
float
max
,
int
seed
,
int
diag_num
,
int
diag_step
,
float
diag_val
,
DenseTensor
*
x_grad
);
DenseTensor
*
out
);
}
// namespace phi
paddle/phi/kernels/uniform_
random_
kernel.cc
→
paddle/phi/kernels/uniform_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
...
...
@@ -29,38 +29,36 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
const
Scalar
&
max
,
int
seed
,
DenseTensor
*
out
)
{
UniformRandomRawKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
0
,
0
,
0.0
f
,
out
);
UniformRawKernel
<
T
>
(
dev_ctx
,
shape
,
dtype
,
min
,
max
,
seed
,
0
,
0
,
0.0
f
,
out
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform
_random
,
PD_REGISTER_KERNEL
(
uniform
,
CPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
Kernel
,
phi
::
UniformKernel
,
float
,
double
,
phi
::
dtype
::
bfloat16
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL
(
uniform
_random
,
PD_REGISTER_KERNEL
(
uniform
,
GPU
,
ALL_LAYOUT
,
phi
::
Uniform
Random
Kernel
,
phi
::
UniformKernel
,
float
,
double
,
phi
::
dtype
::
float16
)
{}
#endif
#ifdef PADDLE_WITH_XPU
PD_REGISTER_KERNEL
(
uniform_random
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRandomKernel
,
float
)
{}
PD_REGISTER_KERNEL
(
uniform
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformKernel
,
float
)
{}
#endif
paddle/phi/kernels/uniform_
random_
kernel.h
→
paddle/phi/kernels/uniform_kernel.h
浏览文件 @
399047d7
...
...
@@ -22,7 +22,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
Uniform
Random
Kernel
(
const
Context
&
dev_ctx
,
void
UniformKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
paddle/phi/kernels/xpu/
where_index
_kernel.cc
→
paddle/phi/kernels/xpu/
nonzero
_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
where_index
_kernel.h"
#include "paddle/phi/kernels/
nonzero
_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
...
...
@@ -22,7 +22,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
WhereIndex
Kernel
(
const
Context
&
dev_ctx
,
void
NonZero
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
condition
,
DenseTensor
*
out
)
{
const
T
*
cond_data
=
condition
.
data
<
T
>
();
...
...
@@ -69,4 +69,4 @@ void WhereIndexKernel(const Context& dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
where_index
,
XPU
,
ALL_LAYOUT
,
phi
::
WhereIndex
Kernel
,
int
,
bool
,
float
)
{}
nonzero
,
XPU
,
ALL_LAYOUT
,
phi
::
NonZero
Kernel
,
int
,
bool
,
float
)
{}
paddle/phi/kernels/xpu/
reduce_
prod_kernel.cc
→
paddle/phi/kernels/xpu/prod_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/
reduce_
prod_kernel.h"
#include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
...
...
paddle/phi/kernels/xpu/tril_
triu_
grad_kernel.cc
→
paddle/phi/kernels/xpu/tril_grad_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/tril_
triu_
grad_kernel.h"
#include "paddle/phi/kernels/tril_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
@@ -20,7 +20,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
GradKernel
(
const
Context
&
ctx
,
void
TrilGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
int
diagonal
,
bool
lower
,
...
...
@@ -49,4 +49,4 @@ void TrilTriuGradKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
tril_
triu_grad
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilTriu
GradKernel
,
int
,
float
)
{}
tril_
grad
,
XPU
,
ALL_LAYOUT
,
phi
::
Tril
GradKernel
,
int
,
float
)
{}
paddle/phi/kernels/xpu/tril_
triu_
kernel.cc
→
paddle/phi/kernels/xpu/tril_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/tril_
triu_
kernel.h"
#include "paddle/phi/kernels/tril_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
...
...
@@ -20,7 +20,7 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Tril
Triu
Kernel
(
const
Context
&
ctx
,
void
TrilKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
int
diagonal
,
bool
lower
,
...
...
@@ -48,5 +48,4 @@ void TrilTriuKernel(const Context& ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
tril_triu
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilTriuKernel
,
int
,
float
)
{}
PD_REGISTER_KERNEL
(
tril
,
XPU
,
ALL_LAYOUT
,
phi
::
TrilKernel
,
int
,
float
)
{}
paddle/phi/kernels/xpu/uniform_
random_
kernel.cc
→
paddle/phi/kernels/xpu/uniform_kernel.cc
浏览文件 @
399047d7
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/uniform_
random_
kernel.h"
#include "paddle/phi/kernels/uniform_kernel.h"
#include <string>
...
...
@@ -24,7 +24,7 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
UniformRa
ndomRa
wKernel
(
const
Context
&
dev_ctx
,
void
UniformRawKernel
(
const
Context
&
dev_ctx
,
const
IntArray
&
shape
,
DataType
dtype
,
const
Scalar
&
min
,
...
...
@@ -76,5 +76,5 @@ void UniformRandomRawKernel(const Context &dev_ctx,
}
// namespace phi
PD_REGISTER_KERNEL
(
uniform_random_raw
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRandomRawKernel
,
float
)
{
}
PD_REGISTER_KERNEL
(
uniform_raw
,
XPU
,
ALL_LAYOUT
,
phi
::
UniformRawKernel
,
float
)
{
}
paddle/phi/kernels/yolo_loss_grad_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
YoloLossGradKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
DenseTensor
&
objectness_mask
,
const
DenseTensor
&
gt_match_mask
,
const
DenseTensor
&
loss_grad
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
x_grad
,
DenseTensor
*
gt_box_grad
,
DenseTensor
*
gt_label_grad
,
DenseTensor
*
gt_score_grad
);
}
// namespace phi
paddle/phi/kernels/yolo_loss_kernel.h
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
YoloLossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
loss
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
gt_match_mask
);
}
// namespace phi
paddle/phi/kernels/yolov3_loss_kernel.h
已删除
100644 → 0
浏览文件 @
957fbb02
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Yolov3LossKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
gt_box
,
const
DenseTensor
&
gt_label
,
const
paddle
::
optional
<
DenseTensor
>&
gt_score
,
const
std
::
vector
<
int
>&
anchors
,
const
std
::
vector
<
int
>&
anchor_mask
,
int
class_num
,
float
ignore_thresh
,
int
downsample_ratio
,
bool
use_label_smooth
,
float
scale_x_Y
,
DenseTensor
*
loss
,
DenseTensor
*
objectness_mask
,
DenseTensor
*
gt_match_mask
);
}
// namespace phi
paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
浏览文件 @
399047d7
...
...
@@ -18,7 +18,7 @@ namespace phi {
KernelSignature
HierarchicalSigmoidOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"h
ierarchical_sigmoid
"
,
return
KernelSignature
(
"h
sigmoid_loss
"
,
{
"X"
,
"W"
,
"Label"
,
"PathTable"
,
"PathCode"
,
"Bias"
},
{
"num_classes"
,
"remote_prefetch"
,
...
...
@@ -33,7 +33,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
KernelSignature
HierarchicalSigmoidGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsDenseTensorOutput
(
"W@GRAD"
))
{
return
KernelSignature
(
"h
ierarchical_sigmoid
_grad"
,
return
KernelSignature
(
"h
sigmoid_loss
_grad"
,
{
"X"
,
"W"
,
"Label"
,
...
...
@@ -51,7 +51,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"is_sparse"
},
{
"X@GRAD"
,
"W@GRAD"
,
"Bias@GRAD"
});
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"W@GRAD"
))
{
return
KernelSignature
(
"h
ierarchical_sigmoid
_grad_sr"
,
return
KernelSignature
(
"h
sigmoid_loss
_grad_sr"
,
{
"X"
,
"W"
,
"Label"
,
...
...
@@ -75,6 +75,9 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
hierarchical_sigmoid
,
hsigmoid_loss
);
PD_REGISTER_BASE_KERNEL_NAME
(
hierarchical_sigmoid_grad
,
hsigmoid_loss_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid
,
phi
::
HierarchicalSigmoidOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
hierarchical_sigmoid_grad
,
...
...
paddle/phi/ops/compat/tril_triu_sig.cc
浏览文件 @
399047d7
...
...
@@ -17,16 +17,19 @@ limitations under the License. */
namespace
phi
{
KernelSignature
TrilTriuOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"tril
_triu
"
,
{
"X"
},
{
"diagonal"
,
"lower"
},
{
"Out"
});
return
KernelSignature
(
"tril"
,
{
"X"
},
{
"diagonal"
,
"lower"
},
{
"Out"
});
}
KernelSignature
TrilTriuGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"tril_
triu_
grad"
,
{
"Out@GRAD"
},
{
"diagonal"
,
"lower"
},
{
"X@GRAD"
});
"tril_grad"
,
{
"Out@GRAD"
},
{
"diagonal"
,
"lower"
},
{
"X@GRAD"
});
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
tril_triu
,
tril
);
PD_REGISTER_BASE_KERNEL_NAME
(
tril_triu_grad
,
tril_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu
,
phi
::
TrilTriuOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
tril_triu_grad
,
phi
::
TrilTriuGradOpArgumentMapping
);
paddle/phi/ops/compat/uniform_random_inplace_sig.cc
浏览文件 @
399047d7
...
...
@@ -18,7 +18,7 @@ namespace phi {
KernelSignature
UniformRandomInplaceOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"uniform_
random_
inplace"
,
"uniform_inplace"
,
{
"X"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"Out"
});
...
...
@@ -27,7 +27,7 @@ KernelSignature UniformRandomInplaceOpArgumentMapping(
KernelSignature
UniformRandomInplaceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"uniform_
random_
inplace_grad"
,
"uniform_inplace_grad"
,
{
"Out@GRAD"
},
{
"min"
,
"max"
,
"seed"
,
"diag_num"
,
"diag_step"
,
"diag_val"
},
{
"X@GRAD"
});
...
...
@@ -35,6 +35,8 @@ KernelSignature UniformRandomInplaceGradOpArgumentMapping(
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
uniform_random_inplace
,
uniform_inplace
);
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random_inplace
,
phi
::
UniformRandomInplaceOpArgumentMapping
);
...
...
paddle/phi/ops/compat/uniform_random_sig.cc
浏览文件 @
399047d7
...
...
@@ -22,7 +22,7 @@ KernelSignature UniformRandomOpArgumentMapping(
if
(
ctx
.
IsDenseTensorOutput
(
"Out"
))
{
if
(
diag_num
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{
"ShapeTensorList"
,
"dtype"
,
...
...
@@ -37,7 +37,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{
"ShapeTensor"
,
"dtype"
,
...
...
@@ -49,7 +49,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w"
,
return
KernelSignature
(
"uniform_raw"
,
{},
{
"shape"
,
"dtype"
,
...
...
@@ -65,7 +65,7 @@ KernelSignature UniformRandomOpArgumentMapping(
}
else
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform
_random
"
,
"uniform"
,
{},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
...
...
@@ -73,22 +73,20 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform
_random
"
,
return
KernelSignature
(
"uniform"
,
{},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"uniform_random"
,
{},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
return
KernelSignature
(
"uniform"
,
{},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
}
}
}
}
else
if
(
ctx
.
IsSelectedRowsOutput
(
"Out"
))
{
if
(
diag_num
)
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{
"ShapeTensorList"
,
"dtype"
,
...
...
@@ -103,7 +101,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{
"ShapeTensor"
,
"dtype"
,
...
...
@@ -115,7 +113,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"uniform_ra
ndom_ra
w_sr"
,
return
KernelSignature
(
"uniform_raw_sr"
,
{},
{
"shape"
,
"dtype"
,
...
...
@@ -131,7 +129,7 @@ KernelSignature UniformRandomOpArgumentMapping(
}
else
{
if
(
ctx
.
InputSize
(
"ShapeTensorList"
)
>
0
)
{
return
KernelSignature
(
"uniform_
random_
sr"
,
"uniform_sr"
,
{},
{
"ShapeTensorList"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
...
...
@@ -139,12 +137,12 @@ KernelSignature UniformRandomOpArgumentMapping(
const
auto
&
shape
=
paddle
::
any_cast
<
std
::
vector
<
int64_t
>>
(
ctx
.
Attr
(
"shape"
));
if
(
ctx
.
HasInput
(
"ShapeTensor"
)
&&
shape
.
empty
())
{
return
KernelSignature
(
"uniform_
random_
sr"
,
return
KernelSignature
(
"uniform_sr"
,
{},
{
"ShapeTensor"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
}
else
{
return
KernelSignature
(
"uniform_
random_
sr"
,
return
KernelSignature
(
"uniform_sr"
,
{},
{
"shape"
,
"dtype"
,
"min"
,
"max"
,
"seed"
},
{
"Out"
});
...
...
@@ -156,4 +154,6 @@ KernelSignature UniformRandomOpArgumentMapping(
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
uniform_random
,
uniform
);
PD_REGISTER_ARG_MAPPING_FN
(
uniform_random
,
phi
::
UniformRandomOpArgumentMapping
);
paddle/phi/ops/compat/where_index_sig.cc
0 → 100644
浏览文件 @
399047d7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
KernelSignature
WhereIndexOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"nonzero"
,
{
"Condition"
},
{},
{
"Out"
});
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
where_index
,
nonzero
);
PD_REGISTER_ARG_MAPPING_FN
(
where_index
,
phi
::
WhereIndexOpArgumentMapping
);
paddle/phi/ops/compat/yolov3_loss_sig.cc
浏览文件 @
399047d7
...
...
@@ -17,7 +17,7 @@
namespace
phi
{
KernelSignature
Yolov3LossOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"yolo
v3
_loss"
,
return
KernelSignature
(
"yolo_loss"
,
{
"X"
,
"GTBox"
,
"GTLabel"
,
"GTScore"
},
{
"anchors"
,
"anchor_mask"
,
...
...
@@ -32,7 +32,7 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature
Yolov3LossGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"yolo
v3
_loss_grad"
,
"yolo_loss_grad"
,
{
"X"
,
"GTBox"
,
"GTLabel"
,
...
...
@@ -51,6 +51,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping(
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
yolov3_loss
,
yolo_loss
);
PD_REGISTER_BASE_KERNEL_NAME
(
yolov3_loss_grad
,
yolo_loss_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss
,
phi
::
Yolov3LossOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
yolov3_loss_grad
,
phi
::
Yolov3LossGradOpArgumentMapping
);
python/paddle/fluid/initializer.py
浏览文件 @
399047d7
...
...
@@ -309,7 +309,7 @@ class UniformInitializer(Initializer):
if
framework
.
_non_static_mode
():
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
var
.
shape
,
out_dtype
,
self
.
_low
,
...
...
@@ -711,7 +711,7 @@ class XavierInitializer(Initializer):
if
self
.
_uniform
:
limit
=
math
.
sqrt
(
6.0
/
float
(
fan_in
+
fan_out
))
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
out_var
.
shape
,
out_dtype
,
-
limit
,
...
...
@@ -923,7 +923,7 @@ class MSRAInitializer(Initializer):
gain
=
calculate_gain
(
self
.
_nonlinearity
,
self
.
_negative_slope
)
limit
=
gain
*
math
.
sqrt
(
3.0
/
float
(
fan_in
))
if
in_dygraph_mode
():
out_var
=
_C_ops
.
uniform
_random
(
out_var
=
_C_ops
.
uniform
(
var
.
shape
,
out_dtype
,
-
limit
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
399047d7
...
...
@@ -5385,7 +5385,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
)
)
if in_dygraph_mode():
return _C_ops.
reduce_
prod(
return _C_ops.prod(
input,
dim if dim != None and dim != [] else [0],
keep_dim,
...
...
@@ -15548,7 +15548,7 @@ def where(condition):
"""
if in_dygraph_mode():
return _C_ops.
where_index
(condition)
return _C_ops.
nonzero
(condition)
if _in_legacy_dygraph():
return _legacy_C_ops.where_index(condition)
...
...
@@ -16567,7 +16567,7 @@ def uniform_random(
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform
_random
(
return _C_ops.uniform(
shape,
dtype,
float(min),
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
399047d7
...
...
@@ -1017,7 +1017,7 @@ def hsigmoid_loss(
# [1.92374969]]
"""
if
in_dygraph_mode
():
out
,
_
,
_
=
_C_ops
.
h
ierarchical_sigmoid
(
out
,
_
,
_
=
_C_ops
.
h
sigmoid_loss
(
input
,
weight
,
label
,
...
...
python/paddle/tensor/creation.py
浏览文件 @
399047d7
...
...
@@ -1097,7 +1097,7 @@ def tril(x, diagonal=0, name=None):
# [9 , 10, 0 , 0 ]])
"""
if
in_dygraph_mode
():
return
_C_ops
.
tril
_triu
(
x
,
diagonal
,
True
)
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
...
...
@@ -1163,7 +1163,7 @@ def triu(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tril
_triu
(
x
,
diagonal
,
False
)
return
_C_ops
.
tril
(
x
,
diagonal
,
False
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
...
...
python/paddle/tensor/math.py
浏览文件 @
399047d7
...
...
@@ -3706,7 +3706,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
dim
=
[
0
]
if
in_dygraph_mode
():
return
_C_ops
.
reduce_
prod
(
x
,
dim
,
keepdim
,
reduce_all
)
return
_C_ops
.
prod
(
x
,
dim
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_prod
(
x
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
...
...
python/paddle/tensor/random.py
浏览文件 @
399047d7
...
...
@@ -584,7 +584,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
uniform
_random
(
return
_C_ops
.
uniform
(
shape
,
dtype
,
float
(
min
),
...
...
@@ -664,7 +664,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
if
in_dygraph_mode
():
return
_C_ops
.
uniform_
random_
inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
else
:
return
_legacy_C_ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
...
...
python/paddle/tensor/search.py
浏览文件 @
399047d7
...
...
@@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False):
rank
=
len
(
shape
)
if
in_dygraph_mode
():
outs
=
_C_ops
.
where_index
(
x
)
outs
=
_C_ops
.
nonzero
(
x
)
elif
paddle
.
in_dynamic_mode
():
outs
=
_legacy_C_ops
.
where_index
(
x
)
else
:
...
...
python/paddle/vision/ops.py
浏览文件 @
399047d7
...
...
@@ -197,7 +197,7 @@ def yolo_loss(
"""
if
in_dygraph_mode
():
loss
,
_
,
_
=
_C_ops
.
yolo
v3
_loss
(
loss
,
_
,
_
=
_C_ops
.
yolo_loss
(
x
,
gt_box
,
gt_label
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录