未验证 提交 399047d7 编写于 作者: Y YuanRisheng 提交者: GitHub

[PHI]Standardise some C++ API (Part2) (#47510)

* standard_api

* add hardtanh
上级 957fbb02
...@@ -63,8 +63,8 @@ USE_OP_ITSELF(memcpy_d2h); ...@@ -63,8 +63,8 @@ USE_OP_ITSELF(memcpy_d2h);
USE_OP_ITSELF(fetch_v2); USE_OP_ITSELF(fetch_v2);
PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(uniform_random_raw, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(uniform_raw, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(uniform_random, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(uniform, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(transpose, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(transpose, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(reshape, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(reshape, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(split, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(split, GPU, ALL_LAYOUT);
......
...@@ -218,10 +218,10 @@ class Yolov3LossGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -218,10 +218,10 @@ class Yolov3LossGradMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss, DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss,
Yolov3LossInferShapeFunctor, Yolov3LossInferShapeFunctor,
PD_INFER_META(phi::Yolov3LossInferMeta)); PD_INFER_META(phi::YoloLossInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss_grad, DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss_grad,
Yolov3LossGradInferShapeFunctor, Yolov3LossGradInferShapeFunctor,
PD_INFER_META(phi::Yolov3LossGradInferMeta)); PD_INFER_META(phi::YoloLossGradInferMeta));
REGISTER_OPERATOR(yolov3_loss, REGISTER_OPERATOR(yolov3_loss,
ops::Yolov3LossOp, ops::Yolov3LossOp,
ops::Yolov3LossOpMaker, ops::Yolov3LossOpMaker,
......
...@@ -259,7 +259,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER( ...@@ -259,7 +259,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(hierarchical_sigmoid, DECLARE_INFER_SHAPE_FUNCTOR(hierarchical_sigmoid,
HierarchicalSigmoidInferShapeFunctor, HierarchicalSigmoidInferShapeFunctor,
PD_INFER_META(phi::HierarchicalSigmoidInferMeta)); PD_INFER_META(phi::HSigmoidLossInferMeta));
REGISTER_OPERATOR(hierarchical_sigmoid, REGISTER_OPERATOR(hierarchical_sigmoid,
ops::HierarchicalSigmoidOp, ops::HierarchicalSigmoidOp,
ops::HierarchicalSigmoidOpMaker<int>, ops::HierarchicalSigmoidOpMaker<int>,
......
...@@ -93,7 +93,7 @@ namespace ops = paddle::operators; ...@@ -93,7 +93,7 @@ namespace ops = paddle::operators;
namespace plat = paddle::platform; namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(tril_triu, DECLARE_INFER_SHAPE_FUNCTOR(tril_triu,
TrilTriuInferShapeFunctor, TrilTriuInferShapeFunctor,
PD_INFER_META(phi::TrilTriuInferMeta)); PD_INFER_META(phi::TrilInferMeta));
REGISTER_OPERATOR(tril_triu, REGISTER_OPERATOR(tril_triu,
ops::TrilTriuOp, ops::TrilTriuOp,
ops::TrilTriuOpMaker, ops::TrilTriuOpMaker,
......
...@@ -48,7 +48,7 @@ class WhereIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -48,7 +48,7 @@ class WhereIndexOpMaker : public framework::OpProtoAndCheckerMaker {
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(where_index, DECLARE_INFER_SHAPE_FUNCTOR(where_index,
WhereIndexInferShapeFunctor, WhereIndexInferShapeFunctor,
PD_INFER_META(phi::WhereIndexInferMeta)); PD_INFER_META(phi::NonZeroInferMeta));
REGISTER_OPERATOR( REGISTER_OPERATOR(
where_index, where_index,
ops::WhereIndexOp, ops::WhereIndexOp,
......
...@@ -791,8 +791,8 @@ ...@@ -791,8 +791,8 @@
func : hard_tanh_grad func : hard_tanh_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : hierarchical_sigmoid_grad - backward_op : hsigmoid_loss_grad
forward : hierarchical_sigmoid (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out) forward : hsigmoid_loss (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse) args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output : Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad) output : Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta : infer_meta :
...@@ -800,7 +800,7 @@ ...@@ -800,7 +800,7 @@
param : [x ,w, bias] param : [x ,w, bias]
optional: path, code, bias optional: path, code, bias
kernel : kernel :
func : hierarchical_sigmoid_grad func : hsigmoid_loss_grad
- backward_op : huber_loss_grad - backward_op : huber_loss_grad
forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual) forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
...@@ -1477,6 +1477,16 @@ ...@@ -1477,6 +1477,16 @@
kernel : kernel :
func : prelu_grad func : prelu_grad
- backward_op : prod_grad
forward : prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : prod_grad
- backward_op : psroi_pool_grad - backward_op : psroi_pool_grad
forward : psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out) forward : psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale) args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
...@@ -1516,16 +1526,6 @@ ...@@ -1516,16 +1526,6 @@
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : real_grad_impl(out_grad, x_grad) invoke : real_grad_impl(out_grad, x_grad)
- backward_op : reduce_prod_grad
forward : reduce_prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : prod_grad
- backward_op : relu6_grad - backward_op : relu6_grad
forward : relu6 (Tensor x, float threshold) -> Tensor(out) forward : relu6 (Tensor x, float threshold) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold) args : (Tensor out, Tensor out_grad, float threshold)
...@@ -2234,15 +2234,15 @@ ...@@ -2234,15 +2234,15 @@
kernel : kernel :
func : triangular_solve_grad func : triangular_solve_grad
- backward_op : tril_triu_grad - backward_op : tril_grad
forward : tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out) forward : tril(Tensor x, int diagonal, bool lower) -> Tensor(out)
args : (Tensor out_grad, int diagonal, bool lower) args : (Tensor out_grad, int diagonal, bool lower)
output : Tensor(x_grad) output : Tensor(x_grad)
infer_meta : infer_meta :
func : UnchangedInferMeta func : UnchangedInferMeta
param : [out_grad] param : [out_grad]
kernel : kernel :
func : tril_triu_grad func : tril_grad
- backward_op : trilinear_interp_grad - backward_op : trilinear_interp_grad
forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
...@@ -2273,14 +2273,14 @@ ...@@ -2273,14 +2273,14 @@
func : unfold_grad func : unfold_grad
no_need_buffer : x no_need_buffer : x
- backward_op : uniform_random_inplace_grad - backward_op : uniform_inplace_grad
forward : uniform_random_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out) forward : uniform_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
args : (Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val) args : (Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output : Tensor(x_grad) output : Tensor(x_grad)
infer_meta : infer_meta :
func : UniformRandomInplaceGradInferMeta func : UniformRandomInplaceGradInferMeta
kernel : kernel :
func : uniform_random_inplace_grad func : uniform_inplace_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : unsqueeze_double_grad - backward_op : unsqueeze_double_grad
...@@ -2335,14 +2335,14 @@ ...@@ -2335,14 +2335,14 @@
func : where_grad func : where_grad
no_need_buffer : x, y no_need_buffer : x, y
- backward_op : yolov3_loss_grad - backward_op : yolo_loss_grad
forward : yolov3_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask) forward : yolo_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output : Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad) output : Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad)
infer_meta : infer_meta :
func : Yolov3LossGradInferMeta func : YoloLossGradInferMeta
kernel : kernel :
func : yolov3_loss_grad func : yolo_loss_grad
optional : gt_score optional : gt_score
- backward_op: fold_grad - backward_op: fold_grad
......
...@@ -1036,17 +1036,6 @@ ...@@ -1036,17 +1036,6 @@
func : hard_tanh func : hard_tanh
backward : hardtanh_grad backward : hardtanh_grad
- op : hierarchical_sigmoid
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta :
func : HierarchicalSigmoidInferMeta
optional: path, code, bias
kernel :
func : hierarchical_sigmoid
data_type : x
backward : hierarchical_sigmoid_grad
- op : histogram - op : histogram
args : (Tensor input, int64_t bins, int min, int max) args : (Tensor input, int64_t bins, int min, int max)
output : Tensor(out) output : Tensor(out)
...@@ -1055,6 +1044,17 @@ ...@@ -1055,6 +1044,17 @@
kernel : kernel :
func : histogram func : histogram
- op : hsigmoid_loss
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta :
func : HSigmoidLossInferMeta
optional: path, code, bias
kernel :
func : hsigmoid_loss
data_type : x
backward : hsigmoid_loss_grad
- op : huber_loss - op : huber_loss
args : (Tensor input, Tensor label, float delta) args : (Tensor input, Tensor label, float delta)
output : Tensor(out), Tensor(residual) output : Tensor(out), Tensor(residual)
...@@ -1696,6 +1696,14 @@ ...@@ -1696,6 +1696,14 @@
func : nms func : nms
data_type : x data_type : x
- op : nonzero
args : (Tensor condition)
output : Tensor(out)
infer_meta :
func : NonZeroInferMeta
kernel :
func : nonzero
- op : norm - op : norm
args : (Tensor x, int axis, float epsilon, bool is_test) args : (Tensor x, int axis, float epsilon, bool is_test)
output : Tensor(out), Tensor(norm) output : Tensor(out), Tensor(norm)
...@@ -1828,6 +1836,15 @@ ...@@ -1828,6 +1836,15 @@
kernel : kernel :
func : prior_box func : prior_box
- op : prod
args : (Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output : Tensor
infer_meta :
func : ReduceIntArrayAxisInferMetaBase
kernel :
func : prod_raw
backward : prod_grad
- op : psroi_pool - op : psroi_pool
args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output : Tensor output : Tensor
...@@ -1893,15 +1910,6 @@ ...@@ -1893,15 +1910,6 @@
func : real func : real
backward : real_grad backward : real_grad
- op : reduce_prod
args : (Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output : Tensor
infer_meta :
func : ReduceIntArrayAxisInferMetaBase
kernel :
func : prod_raw
backward : reduce_prod_grad
- op : relu - op : relu
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
...@@ -2460,6 +2468,15 @@ ...@@ -2460,6 +2468,15 @@
func : triangular_solve func : triangular_solve
backward : triangular_solve_grad backward : triangular_solve_grad
- op : tril
args : (Tensor x, int diagonal, bool lower)
output : Tensor(out)
infer_meta :
func : TrilInferMeta
kernel :
func : tril
backward : tril_grad
- op : tril_indices - op : tril_indices
args : (int rows, int cols, int offset, DataType dtype, Place place={}) args : (int rows, int cols, int offset, DataType dtype, Place place={})
output : Tensor(out) output : Tensor(out)
...@@ -2472,15 +2489,6 @@ ...@@ -2472,15 +2489,6 @@
data_type : dtype data_type : dtype
backend : place backend : place
- op : tril_triu
args : (Tensor x, int diagonal, bool lower)
output : Tensor(out)
infer_meta :
func : TrilTriuInferMeta
kernel :
func : tril_triu
backward : tril_triu_grad
- op : trilinear_interp - op : trilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output) output : Tensor(output)
...@@ -2535,14 +2543,14 @@ ...@@ -2535,14 +2543,14 @@
func : unfold func : unfold
backward : unfold_grad backward : unfold_grad
- op : uniform_random - op : uniform
args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={}) args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
func : UniformRandomInferMeta func : UniformRandomInferMeta
param: [shape, dtype] param: [shape, dtype]
kernel : kernel :
func : uniform_random func : uniform
param: [shape, dtype, min, max, seed] param: [shape, dtype, min, max, seed]
data_type : dtype data_type : dtype
backend : place backend : place
...@@ -2628,14 +2636,6 @@ ...@@ -2628,14 +2636,6 @@
func : where func : where
backward : where_grad backward : where_grad
- op : where_index
args : (Tensor condition)
output : Tensor(out)
infer_meta :
func : WhereIndexInferMeta
kernel :
func : where_index
- op : yolo_box - op : yolo_box
args : (Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5) args : (Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
output : Tensor(boxes), Tensor(scores) output : Tensor(boxes), Tensor(scores)
...@@ -2645,16 +2645,16 @@ ...@@ -2645,16 +2645,16 @@
func : yolo_box func : yolo_box
data_type : x data_type : x
- op : yolov3_loss - op : yolo_loss
args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output : Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask) output : Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
infer_meta : infer_meta :
func : Yolov3LossInferMeta func : YoloLossInferMeta
kernel : kernel :
func : yolov3_loss func : yolo_loss
data_type : x data_type : x
optional : gt_score optional : gt_score
backward : yolov3_loss_grad backward : yolo_loss_grad
- op : zeros - op : zeros
args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
...@@ -2734,16 +2734,16 @@ ...@@ -2734,16 +2734,16 @@
intermediate : reserve intermediate : reserve
view : (dropout_state_in -> dropout_state_out) view : (dropout_state_in -> dropout_state_out)
- op: uniform_random_inplace - op: uniform_inplace
args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output: Tensor(out) output: Tensor(out)
infer_meta: infer_meta:
func: UniformRandomInplaceInferMeta func: UniformRandomInplaceInferMeta
kernel: kernel:
func: uniform_random_inplace func: uniform_inplace
data_type: x data_type: x
inplace: (x -> out) inplace: (x -> out)
backward: uniform_random_inplace_grad backward: uniform_inplace_grad
- op: unpool - op: unpool
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
......
...@@ -987,7 +987,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad, ...@@ -987,7 +987,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
x_grad->set_dtype(out_grad[0]->dtype()); x_grad->set_dtype(out_grad[0]->dtype());
} }
void Yolov3LossGradInferMeta(const MetaTensor& x, void YoloLossGradInferMeta(const MetaTensor& x,
const MetaTensor& gt_box, const MetaTensor& gt_box,
const MetaTensor& gt_label, const MetaTensor& gt_label,
const MetaTensor& gt_score, const MetaTensor& gt_score,
......
...@@ -385,7 +385,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad, ...@@ -385,7 +385,7 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int axis, int axis,
MetaTensor* x_grad); MetaTensor* x_grad);
void Yolov3LossGradInferMeta(const MetaTensor& x, void YoloLossGradInferMeta(const MetaTensor& x,
const MetaTensor& gt_box, const MetaTensor& gt_box,
const MetaTensor& gt_label, const MetaTensor& gt_label,
const MetaTensor& gt_score, const MetaTensor& gt_score,
......
...@@ -1328,7 +1328,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row, ...@@ -1328,7 +1328,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
out_count->set_dtype(DataType::INT32); out_count->set_dtype(DataType::INT32);
} }
void HierarchicalSigmoidInferMeta(const MetaTensor& x, void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& w, const MetaTensor& w,
const MetaTensor& label, const MetaTensor& label,
const MetaTensor& path, const MetaTensor& path,
...@@ -2762,7 +2762,7 @@ void WhereInferMeta(const MetaTensor& condition, ...@@ -2762,7 +2762,7 @@ void WhereInferMeta(const MetaTensor& condition,
out->share_meta(x); out->share_meta(x);
} }
void Yolov3LossInferMeta(const MetaTensor& x, void YoloLossInferMeta(const MetaTensor& x,
const MetaTensor& gt_box, const MetaTensor& gt_box,
const MetaTensor& gt_label, const MetaTensor& gt_label,
const MetaTensor& gt_score, const MetaTensor& gt_score,
......
...@@ -288,7 +288,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row, ...@@ -288,7 +288,7 @@ void GraphSampleNeighborsInferMeta(const MetaTensor& row,
MetaTensor* out_count, MetaTensor* out_count,
MetaTensor* out_eids); MetaTensor* out_eids);
void HierarchicalSigmoidInferMeta(const MetaTensor& x, void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& w, const MetaTensor& w,
const MetaTensor& label, const MetaTensor& label,
const MetaTensor& path, const MetaTensor& path,
...@@ -508,7 +508,7 @@ void WhereInferMeta(const MetaTensor& condition, ...@@ -508,7 +508,7 @@ void WhereInferMeta(const MetaTensor& condition,
const MetaTensor& y, const MetaTensor& y,
MetaTensor* out); MetaTensor* out);
void Yolov3LossInferMeta(const MetaTensor& x, void YoloLossInferMeta(const MetaTensor& x,
const MetaTensor& gt_box, const MetaTensor& gt_box,
const MetaTensor& gt_label, const MetaTensor& gt_label,
const MetaTensor& gt_score, const MetaTensor& gt_score,
......
...@@ -402,64 +402,6 @@ void InstanceNormInferMeta(const MetaTensor& x, ...@@ -402,64 +402,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
} }
} }
void SendURecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count) {
auto src_index_dims = src_index.dims();
if (src_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(src_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d",
src_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
src_index_dims.size(),
1,
phi::errors::InvalidArgument(
"The Src_index should be 1D, when it is not 2D, but we get %d",
src_index_dims.size()));
}
auto dst_index_dims = dst_index.dims();
if (dst_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(dst_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d",
dst_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
dst_index_dims.size(),
1,
phi::errors::InvalidArgument("The Dst_index should be 1D, "
"when it is not 2D, but we get %d",
dst_index_dims.size()));
}
PADDLE_ENFORCE_EQ(src_index_dims[0],
dst_index_dims[0],
phi::errors::InvalidArgument(
"Src_index and Dst_index should have the same shape."));
auto dims = x.dims();
std::vector<int64_t> dims_ = phi::vectorize(dims);
dims_[0] = -1;
out->set_dims(phi::make_ddim(dims_));
out->set_dtype(x.dtype());
if (reduce_op == "MEAN") {
dst_count->set_dims({-1});
dst_count->set_dtype(DataType::INT32);
}
}
void GroupNormInferMeta(const MetaTensor& x, void GroupNormInferMeta(const MetaTensor& x,
const MetaTensor& scale, const MetaTensor& scale,
const MetaTensor& bias, const MetaTensor& bias,
...@@ -1164,6 +1106,64 @@ void ScatterNdAddInferMeta(const MetaTensor& x, ...@@ -1164,6 +1106,64 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype()); out->set_dtype(x.dtype());
} }
void SendURecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count) {
auto src_index_dims = src_index.dims();
if (src_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(src_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d",
src_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
src_index_dims.size(),
1,
phi::errors::InvalidArgument(
"The Src_index should be 1D, when it is not 2D, but we get %d",
src_index_dims.size()));
}
auto dst_index_dims = dst_index.dims();
if (dst_index_dims.size() == 2) {
PADDLE_ENFORCE_EQ(dst_index_dims[1],
1,
phi::errors::InvalidArgument(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d",
dst_index_dims[1]));
} else {
PADDLE_ENFORCE_EQ(
dst_index_dims.size(),
1,
phi::errors::InvalidArgument("The Dst_index should be 1D, "
"when it is not 2D, but we get %d",
dst_index_dims.size()));
}
PADDLE_ENFORCE_EQ(src_index_dims[0],
dst_index_dims[0],
phi::errors::InvalidArgument(
"Src_index and Dst_index should have the same shape."));
auto dims = x.dims();
std::vector<int64_t> dims_ = phi::vectorize(dims);
dims_[0] = -1;
out->set_dims(phi::make_ddim(dims_));
out->set_dtype(x.dtype());
if (reduce_op == "MEAN") {
dst_count->set_dims({-1});
dst_count->set_dtype(DataType::INT32);
}
}
void SpectralNormInferMeta(const MetaTensor& weight, void SpectralNormInferMeta(const MetaTensor& weight,
const MetaTensor& u, const MetaTensor& u,
const MetaTensor& v, const MetaTensor& v,
......
...@@ -72,14 +72,6 @@ void InstanceNormInferMeta(const MetaTensor& x, ...@@ -72,14 +72,6 @@ void InstanceNormInferMeta(const MetaTensor& x,
MetaTensor* saved_variance, MetaTensor* saved_variance,
MetaConfig config = MetaConfig()); MetaConfig config = MetaConfig());
void SendURecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count);
void GroupNormInferMeta(const MetaTensor& x, void GroupNormInferMeta(const MetaTensor& x,
const MetaTensor& scale, const MetaTensor& scale,
const MetaTensor& bias, const MetaTensor& bias,
...@@ -186,6 +178,14 @@ void ScatterNdAddInferMeta(const MetaTensor& x, ...@@ -186,6 +178,14 @@ void ScatterNdAddInferMeta(const MetaTensor& x,
const MetaTensor& updates, const MetaTensor& updates,
MetaTensor* out); MetaTensor* out);
void SendURecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count);
void SpectralNormInferMeta(const MetaTensor& weight, void SpectralNormInferMeta(const MetaTensor& weight,
const MetaTensor& u, const MetaTensor& u,
const MetaTensor& v, const MetaTensor& v,
......
...@@ -1204,6 +1204,211 @@ void FlipInferMeta(const MetaTensor& x, ...@@ -1204,6 +1204,211 @@ void FlipInferMeta(const MetaTensor& x,
out->share_lod(x); out->share_lod(x);
} }
void FoldInferMeta(const MetaTensor& x,
const std::vector<int>& output_sizes,
const std::vector<int>& kernel_sizes,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
MetaTensor* out) {
auto in_dims = x.dims();
PADDLE_ENFORCE_EQ(
output_sizes.size(),
2,
phi::errors::InvalidArgument(
"It is expected output_size equals to 2, but got size %d",
output_sizes.size()));
PADDLE_ENFORCE_EQ(
kernel_sizes.size(),
2,
phi::errors::InvalidArgument(
"It is expected kernel_size equals to 2, but got size %d",
kernel_sizes.size()));
PADDLE_ENFORCE_EQ(
strides.size(),
2,
phi::errors::InvalidArgument(
"It is expected strides_size equals to 2, but got size %d",
strides.size()));
PADDLE_ENFORCE_EQ(
paddings.size(),
4,
phi::errors::InvalidArgument(
"It is expected paddings_size equals to 4, but got size %d",
paddings.size()));
PADDLE_ENFORCE_EQ(
dilations.size(),
2,
phi::errors::InvalidArgument(
"It is expected dilations_size equals to 2, but got size %d",
dilations.size()));
int output_height = output_sizes[0];
int output_width = output_sizes[1];
int kernel_height = kernel_sizes[0];
int kernel_width = kernel_sizes[1];
int dilation_height = dilations[0];
int dilation_width = dilations[1];
int stride_height = strides[0];
int stride_width = strides[1];
// check kernel_sizes
PADDLE_ENFORCE_GT(kernel_height,
0,
phi::errors::InvalidArgument(
"The `kernel_sizes` should be greater than zero, "
"but received kernel_height: %d kernel_width: %d.",
kernel_sizes[0],
kernel_sizes[1]));
PADDLE_ENFORCE_GT(kernel_width,
0,
phi::errors::InvalidArgument(
"The `kernel_sizes` should be greater than zero, "
"but received kernel_height: %d kernel_width: %d.",
kernel_sizes[0],
kernel_sizes[1]));
// check strides
PADDLE_ENFORCE_GT(stride_height,
0,
phi::errors::InvalidArgument(
"The `strides` should be greater than zero, "
"but received strides_height: %d strides_width: %d.",
strides[0],
strides[1]));
PADDLE_ENFORCE_GT(stride_width,
0,
phi::errors::InvalidArgument(
"The `strides` should be greater than zero, "
"but received strides_height: %d strides_width: %d.",
strides[0],
strides[1]));
// check dilations
PADDLE_ENFORCE_GT(output_height,
1,
phi::errors::InvalidArgument(
"The `output_height` should be greater than one, "
"but received output_height: %d .",
output_height));
PADDLE_ENFORCE_GT(output_width,
1,
phi::errors::InvalidArgument(
"The `output_width` should be greater than one, "
"but received output_width: %d .",
output_width));
// check output size
PADDLE_ENFORCE_GT(
dilation_height,
0,
phi::errors::InvalidArgument(
"The `dilations` should be greater than zero, "
"but received dilations_height: %d dilations_width: %d.",
dilations[0],
dilations[1]));
PADDLE_ENFORCE_GT(
dilation_width,
0,
phi::errors::InvalidArgument(
"The `dilations` should be greater than zero, "
"but received dilations_height: %d dilations_width: %d.",
dilations[0],
dilations[1]));
std::vector<int> out_dims;
// batch_size
out_dims.push_back(in_dims[0]);
// output_plane
int output_channels = in_dims[1] / (kernel_width * kernel_height);
out_dims.push_back(output_channels);
int blocks_height = (output_sizes[0] + 2 * paddings[0] -
(dilations[0] * (kernel_sizes[0] - 1) + 1)) /
strides[0] +
1;
int blocks_width = (output_sizes[1] + 2 * paddings[1] -
(dilations[1] * (kernel_sizes[1] - 1) + 1)) /
strides[1] +
1;
// check output height and width
PADDLE_ENFORCE_GT(
blocks_height,
0,
phi::errors::InvalidArgument(
"The sliding blocks calculated from input spatial size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"is (%d, %d), which should be a positive integer.",
in_dims[2],
in_dims[3],
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
output_height,
output_width));
PADDLE_ENFORCE_GT(
blocks_width,
0,
phi::errors::InvalidArgument(
"The sliding blocks calculated from input spatial size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"is (%d, %d), which should be a positive integer.",
in_dims[2],
in_dims[3],
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
output_height,
output_width));
PADDLE_ENFORCE_EQ(
blocks_height * blocks_width,
in_dims[2],
phi::errors::InvalidArgument(
"Given input output_size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"which should be expected size of input's dimension "
"2 to match the calculated number of %d * %d = %d, but got %d",
output_height,
output_width,
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
blocks_height,
blocks_width,
blocks_height * blocks_width,
in_dims[2]));
PADDLE_ENFORCE_EQ(
in_dims[1] % (kernel_sizes[0] * kernel_sizes[1]),
0,
phi::errors::InvalidArgument(
"Expected size of input's dimension 1 to be divisible by the"
"product of kernel_size, but got input.size(1)=%d and "
"kernel_size=( %d"
", %d).",
in_dims[1],
kernel_sizes[0],
kernel_sizes[1]));
out_dims.push_back(output_height);
out_dims.push_back(output_width);
if (out != nullptr) {
out->set_dims(phi::make_ddim(out_dims));
out->set_dtype(x.dtype());
}
}
void FrameInferMeta(const MetaTensor& x, void FrameInferMeta(const MetaTensor& x,
int frame_length, int frame_length,
int hop_length, int hop_length,
...@@ -1327,6 +1532,18 @@ void HistogramInferMeta( ...@@ -1327,6 +1532,18 @@ void HistogramInferMeta(
out->share_lod(input); out->share_lod(input);
} }
void IdentityLossInferMeta(const MetaTensor& x,
int reduction,
MetaTensor* out) {
if (reduction == 2) {
out->set_dtype(x.dtype());
out->set_dims(x.dims());
} else {
out->set_dims(phi::make_ddim({1}));
out->set_dtype(x.dtype());
}
}
void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out) { void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
product(x.dims()), product(x.dims()),
...@@ -2036,6 +2253,17 @@ void NMSInferMeta(const MetaTensor& x, float threshold, MetaTensor* out) { ...@@ -2036,6 +2253,17 @@ void NMSInferMeta(const MetaTensor& x, float threshold, MetaTensor* out) {
out->set_dtype(DataType::INT64); out->set_dtype(DataType::INT64);
} }
void NonZeroInferMeta(const MetaTensor& condition, MetaTensor* out) {
auto rank = condition.dims().size();
PADDLE_ENFORCE_GE(
rank,
1UL,
phi::errors::InvalidArgument(
"Input(Condition) should have number of dimension at least 1"));
out->set_dims(phi::make_ddim({-1, rank}));
out->set_dtype(DataType::INT64);
}
void NormInferMeta(const MetaTensor& x, void NormInferMeta(const MetaTensor& x,
int axis, int axis,
float epsilon, float epsilon,
...@@ -2054,16 +2282,53 @@ void NormInferMeta(const MetaTensor& x, ...@@ -2054,16 +2282,53 @@ void NormInferMeta(const MetaTensor& x,
} }
} }
void OverlapAddInferMeta(const MetaTensor& x, void OneHotRawInferMeta(const MetaTensor& x,
int hop_length, const Scalar& depth,
int axis, DataType dtype,
MetaTensor* out, bool allow_out_of_range,
MetaConfig config) { MetaTensor* out) {
const auto x_dims = x.dims(); auto x_dims = x.dims();
const int x_rank = x_dims.size(); PADDLE_ENFORCE_GE(
x_dims.size(),
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth.to<int>());
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(dtype);
}
void OneHotInferMeta(const MetaTensor& x,
const Scalar& depth_t,
MetaTensor* out) {
auto x_dims = x.dims();
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
x_rank, x_dims.size(),
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
int depth = depth_t.to<int>();
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth);
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(phi::DataType::FLOAT32);
}
void OverlapAddInferMeta(const MetaTensor& x,
int hop_length,
int axis,
MetaTensor* out,
MetaConfig config) {
const auto x_dims = x.dims();
const int x_rank = x_dims.size();
PADDLE_ENFORCE_GE(
x_rank,
2, 2,
errors::InvalidArgument( errors::InvalidArgument(
"Input(X) of OverlapAddOp should be a tensor which contains " "Input(X) of OverlapAddOp should be a tensor which contains "
...@@ -3956,7 +4221,7 @@ void UnbindInferMeta(const MetaTensor& x, ...@@ -3956,7 +4221,7 @@ void UnbindInferMeta(const MetaTensor& x,
} }
} }
void TrilTriuInferMeta(const MetaTensor& x, void TrilInferMeta(const MetaTensor& x,
int diagonal, int diagonal,
bool lower, bool lower,
MetaTensor* out) { MetaTensor* out) {
...@@ -4442,54 +4707,6 @@ void UnStackInferMeta(const MetaTensor& x, ...@@ -4442,54 +4707,6 @@ void UnStackInferMeta(const MetaTensor& x,
} }
} }
void OneHotRawInferMeta(const MetaTensor& x,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out) {
auto x_dims = x.dims();
PADDLE_ENFORCE_GE(
x_dims.size(),
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth.to<int>());
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(dtype);
}
void OneHotInferMeta(const MetaTensor& x,
const Scalar& depth_t,
MetaTensor* out) {
auto x_dims = x.dims();
PADDLE_ENFORCE_GE(
x_dims.size(),
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
int depth = depth_t.to<int>();
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth);
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(phi::DataType::FLOAT32);
}
void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
auto rank = condition.dims().size();
PADDLE_ENFORCE_GE(
rank,
1UL,
phi::errors::InvalidArgument(
"Input(Condition) should have number of dimension at least 1"));
out->set_dims(phi::make_ddim({-1, rank}));
out->set_dtype(DataType::INT64);
}
void ChannelShuffleInferMeta(const MetaTensor& x, void ChannelShuffleInferMeta(const MetaTensor& x,
int groups, int groups,
const std::string& data_format, const std::string& data_format,
...@@ -4536,223 +4753,6 @@ void ChannelShuffleInferMeta(const MetaTensor& x, ...@@ -4536,223 +4753,6 @@ void ChannelShuffleInferMeta(const MetaTensor& x,
out->set_dims(output_dims); out->set_dims(output_dims);
} }
void IdentityLossInferMeta(const MetaTensor& x,
int reduction,
MetaTensor* out) {
if (reduction == 2) {
out->set_dtype(x.dtype());
out->set_dims(x.dims());
} else {
out->set_dims(phi::make_ddim({1}));
out->set_dtype(x.dtype());
}
}
void FoldInferMeta(const MetaTensor& x,
const std::vector<int>& output_sizes,
const std::vector<int>& kernel_sizes,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
MetaTensor* out) {
auto in_dims = x.dims();
PADDLE_ENFORCE_EQ(
output_sizes.size(),
2,
phi::errors::InvalidArgument(
"It is expected output_size equals to 2, but got size %d",
output_sizes.size()));
PADDLE_ENFORCE_EQ(
kernel_sizes.size(),
2,
phi::errors::InvalidArgument(
"It is expected kernel_size equals to 2, but got size %d",
kernel_sizes.size()));
PADDLE_ENFORCE_EQ(
strides.size(),
2,
phi::errors::InvalidArgument(
"It is expected strides_size equals to 2, but got size %d",
strides.size()));
PADDLE_ENFORCE_EQ(
paddings.size(),
4,
phi::errors::InvalidArgument(
"It is expected paddings_size equals to 4, but got size %d",
paddings.size()));
PADDLE_ENFORCE_EQ(
dilations.size(),
2,
phi::errors::InvalidArgument(
"It is expected dilations_size equals to 2, but got size %d",
dilations.size()));
int output_height = output_sizes[0];
int output_width = output_sizes[1];
int kernel_height = kernel_sizes[0];
int kernel_width = kernel_sizes[1];
int dilation_height = dilations[0];
int dilation_width = dilations[1];
int stride_height = strides[0];
int stride_width = strides[1];
// check kernel_sizes
PADDLE_ENFORCE_GT(kernel_height,
0,
phi::errors::InvalidArgument(
"The `kernel_sizes` should be greater than zero, "
"but received kernel_height: %d kernel_width: %d.",
kernel_sizes[0],
kernel_sizes[1]));
PADDLE_ENFORCE_GT(kernel_width,
0,
phi::errors::InvalidArgument(
"The `kernel_sizes` should be greater than zero, "
"but received kernel_height: %d kernel_width: %d.",
kernel_sizes[0],
kernel_sizes[1]));
// check strides
PADDLE_ENFORCE_GT(stride_height,
0,
phi::errors::InvalidArgument(
"The `strides` should be greater than zero, "
"but received strides_height: %d strides_width: %d.",
strides[0],
strides[1]));
PADDLE_ENFORCE_GT(stride_width,
0,
phi::errors::InvalidArgument(
"The `strides` should be greater than zero, "
"but received strides_height: %d strides_width: %d.",
strides[0],
strides[1]));
// check dilations
PADDLE_ENFORCE_GT(output_height,
1,
phi::errors::InvalidArgument(
"The `output_height` should be greater than one, "
"but received output_height: %d .",
output_height));
PADDLE_ENFORCE_GT(output_width,
1,
phi::errors::InvalidArgument(
"The `output_width` should be greater than one, "
"but received output_width: %d .",
output_width));
// check output size
PADDLE_ENFORCE_GT(
dilation_height,
0,
phi::errors::InvalidArgument(
"The `dilations` should be greater than zero, "
"but received dilations_height: %d dilations_width: %d.",
dilations[0],
dilations[1]));
PADDLE_ENFORCE_GT(
dilation_width,
0,
phi::errors::InvalidArgument(
"The `dilations` should be greater than zero, "
"but received dilations_height: %d dilations_width: %d.",
dilations[0],
dilations[1]));
std::vector<int> out_dims;
// batch_size
out_dims.push_back(in_dims[0]);
// output_plane
int output_channels = in_dims[1] / (kernel_width * kernel_height);
out_dims.push_back(output_channels);
int blocks_height = (output_sizes[0] + 2 * paddings[0] -
(dilations[0] * (kernel_sizes[0] - 1) + 1)) /
strides[0] +
1;
int blocks_width = (output_sizes[1] + 2 * paddings[1] -
(dilations[1] * (kernel_sizes[1] - 1) + 1)) /
strides[1] +
1;
// check output height and width
PADDLE_ENFORCE_GT(
blocks_height,
0,
phi::errors::InvalidArgument(
"The sliding blocks calculated from input spatial size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"is (%d, %d), which should be a positive integer.",
in_dims[2],
in_dims[3],
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
output_height,
output_width));
PADDLE_ENFORCE_GT(
blocks_width,
0,
phi::errors::InvalidArgument(
"The sliding blocks calculated from input spatial size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"is (%d, %d), which should be a positive integer.",
in_dims[2],
in_dims[3],
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
output_height,
output_width));
PADDLE_ENFORCE_EQ(
blocks_height * blocks_width,
in_dims[2],
phi::errors::InvalidArgument(
"Given input output_size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
"which should be expected size of input's dimension "
"2 to match the calculated number of %d * %d = %d, but got %d",
output_height,
output_width,
kernel_sizes[0],
kernel_sizes[1],
strides[0],
strides[1],
dilations[0],
dilations[1],
blocks_height,
blocks_width,
blocks_height * blocks_width,
in_dims[2]));
PADDLE_ENFORCE_EQ(
in_dims[1] % (kernel_sizes[0] * kernel_sizes[1]),
0,
phi::errors::InvalidArgument(
"Expected size of input's dimension 1 to be divisible by the"
"product of kernel_size, but got input.size(1)=%d and "
"kernel_size=( %d"
", %d).",
in_dims[1],
kernel_sizes[0],
kernel_sizes[1]));
out_dims.push_back(output_height);
out_dims.push_back(output_width);
if (out != nullptr) {
out->set_dims(phi::make_ddim(out_dims));
out->set_dtype(x.dtype());
}
}
} // namespace phi } // namespace phi
PD_REGISTER_INFER_META_FN(flatten, phi::FlattenInferMeta); PD_REGISTER_INFER_META_FN(flatten, phi::FlattenInferMeta);
...@@ -65,6 +65,11 @@ void BatchSizeLikeInferMeta(const MetaTensor& x, ...@@ -65,6 +65,11 @@ void BatchSizeLikeInferMeta(const MetaTensor& x,
void CastInferMeta(const MetaTensor& x, DataType out_dtype, MetaTensor* out); void CastInferMeta(const MetaTensor& x, DataType out_dtype, MetaTensor* out);
void ChannelShuffleInferMeta(const MetaTensor& x,
int groups,
const std::string& data_format,
MetaTensor* out);
void CholeskyInferMeta(const MetaTensor& x, bool upper, MetaTensor* out); void CholeskyInferMeta(const MetaTensor& x, bool upper, MetaTensor* out);
void ClassCenterSampleInferMeta(const MetaTensor& label, void ClassCenterSampleInferMeta(const MetaTensor& label,
...@@ -191,6 +196,14 @@ void FlipInferMeta(const MetaTensor& x, ...@@ -191,6 +196,14 @@ void FlipInferMeta(const MetaTensor& x,
const std::vector<int>& axis, const std::vector<int>& axis,
MetaTensor* out); MetaTensor* out);
void FoldInferMeta(const MetaTensor& x,
const std::vector<int>& output_sizes,
const std::vector<int>& kernel_sizes,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
MetaTensor* out);
void FrameInferMeta(const MetaTensor& x, void FrameInferMeta(const MetaTensor& x,
int frame_length, int frame_length,
int hop_length, int hop_length,
...@@ -214,6 +227,8 @@ void GumbelSoftmaxInferMeta(const MetaTensor& x, ...@@ -214,6 +227,8 @@ void GumbelSoftmaxInferMeta(const MetaTensor& x,
void HistogramInferMeta( void HistogramInferMeta(
const MetaTensor& input, int64_t bins, int min, int max, MetaTensor* out); const MetaTensor& input, int64_t bins, int min, int max, MetaTensor* out);
void IdentityLossInferMeta(const MetaTensor& x, int reduction, MetaTensor* out);
void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out); void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out);
void InferMetaFromVecValue(const MetaTensor& x, void InferMetaFromVecValue(const MetaTensor& x,
...@@ -288,6 +303,8 @@ void NanmedianInferMeta(const MetaTensor& x, ...@@ -288,6 +303,8 @@ void NanmedianInferMeta(const MetaTensor& x,
MetaTensor* out, MetaTensor* out,
MetaTensor* median_index); MetaTensor* median_index);
void NonZeroInferMeta(const MetaTensor& condition, MetaTensor* out);
void NMSInferMeta(const MetaTensor& x, float threshold, MetaTensor* out); void NMSInferMeta(const MetaTensor& x, float threshold, MetaTensor* out);
void NormInferMeta(const MetaTensor& x, void NormInferMeta(const MetaTensor& x,
...@@ -297,6 +314,14 @@ void NormInferMeta(const MetaTensor& x, ...@@ -297,6 +314,14 @@ void NormInferMeta(const MetaTensor& x,
MetaTensor* out, MetaTensor* out,
MetaTensor* norm); MetaTensor* norm);
void OneHotRawInferMeta(const MetaTensor& x,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out);
void OneHotInferMeta(const MetaTensor& x, const Scalar& depth, MetaTensor* out);
void OverlapAddInferMeta(const MetaTensor& x, void OverlapAddInferMeta(const MetaTensor& x,
int hop_length, int hop_length,
int axis, int axis,
...@@ -576,7 +601,7 @@ void TransposeGradInferMeta(const MetaTensor& x, ...@@ -576,7 +601,7 @@ void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis, const std::vector<int>& axis,
MetaTensor* out); MetaTensor* out);
void TrilTriuInferMeta(const MetaTensor& x, void TrilInferMeta(const MetaTensor& x,
int diagonal, int diagonal,
bool lower, bool lower,
MetaTensor* out); MetaTensor* out);
...@@ -657,29 +682,4 @@ void UnStackInferMeta(const MetaTensor& x, ...@@ -657,29 +682,4 @@ void UnStackInferMeta(const MetaTensor& x,
int num, int num,
std::vector<MetaTensor*> outs); std::vector<MetaTensor*> outs);
void OneHotRawInferMeta(const MetaTensor& x,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out);
void OneHotInferMeta(const MetaTensor& x, const Scalar& depth, MetaTensor* out);
void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out);
void ChannelShuffleInferMeta(const MetaTensor& x,
int groups,
const std::string& data_format,
MetaTensor* out);
void IdentityLossInferMeta(const MetaTensor& x, int reduction, MetaTensor* out);
void FoldInferMeta(const MetaTensor& x,
const std::vector<int>& output_sizes,
const std::vector<int>& kernel_sizes,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
MetaTensor* out);
} // namespace phi } // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h"
namespace phi {
template <typename T, typename Context>
void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
const std::vector<int64_t>& height_sections,
const std::vector<std::string>& epmap,
const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
DenseTensor* bias_grad) {
HierarchicalSigmoidGradKernelImpl<T>(ctx,
x,
w,
label,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
height_sections,
epmap,
table_names,
is_sparse,
x_grad,
w_grad,
bias_grad);
}
} // namespace phi
PD_REGISTER_KERNEL(hierarchical_sigmoid_grad,
CPU,
ALL_LAYOUT,
phi::HierarchicalSigmoidGradKernel,
float,
double) {}
...@@ -26,8 +26,7 @@ namespace phi { ...@@ -26,8 +26,7 @@ namespace phi {
namespace math = paddle::operators::math; namespace math = paddle::operators::math;
template <typename T, typename Context> template <typename T, typename Context>
void HierarchicalSigmoidGradKernelImpl( void HSigmoidLossGradKernelImpl(const Context& ctx,
const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/hsigmoid_loss_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hsigmoid_loss_grad.h"
namespace phi {
template <typename T, typename Context>
void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
const std::vector<int64_t>& height_sections,
const std::vector<std::string>& epmap,
const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
DenseTensor* bias_grad) {
HSigmoidLossGradKernelImpl<T>(ctx,
x,
w,
label,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
height_sections,
epmap,
table_names,
is_sparse,
x_grad,
w_grad,
bias_grad);
}
} // namespace phi
PD_REGISTER_KERNEL(hsigmoid_loss_grad,
CPU,
ALL_LAYOUT,
phi::HSigmoidLossGradKernel,
float,
double) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/hierarchical_sigmoid_kernel.h" #include "paddle/phi/kernels/hsigmoid_loss_kernel.h"
#include "paddle/fluid/operators/math/matrix_bit_code.h" #include "paddle/fluid/operators/math/matrix_bit_code.h"
#include "paddle/fluid/platform/transform.h" #include "paddle/fluid/platform/transform.h"
...@@ -28,7 +28,7 @@ namespace phi { ...@@ -28,7 +28,7 @@ namespace phi {
namespace math = paddle::operators::math; namespace math = paddle::operators::math;
template <typename T, typename Context> template <typename T, typename Context>
void HierarchicalSigmoidKernel(const Context& ctx, void HSigmoidLossKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
...@@ -106,9 +106,5 @@ void HierarchicalSigmoidKernel(const Context& ctx, ...@@ -106,9 +106,5 @@ void HierarchicalSigmoidKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(hierarchical_sigmoid, PD_REGISTER_KERNEL(
CPU, hsigmoid_loss, CPU, ALL_LAYOUT, phi::HSigmoidLossKernel, float, double) {}
ALL_LAYOUT,
phi::HierarchicalSigmoidKernel,
float,
double) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/where_index_kernel.h" #include "paddle/phi/kernels/nonzero_kernel.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -47,7 +47,7 @@ struct WhereIndexFunctor { ...@@ -47,7 +47,7 @@ struct WhereIndexFunctor {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void WhereIndexKernel(const Context& dev_ctx, void NonZeroKernel(const Context& dev_ctx,
const DenseTensor& condition, const DenseTensor& condition,
DenseTensor* out) { DenseTensor* out) {
const T* cond_data = condition.data<T>(); const T* cond_data = condition.data<T>();
...@@ -83,10 +83,10 @@ void WhereIndexKernel(const Context& dev_ctx, ...@@ -83,10 +83,10 @@ void WhereIndexKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(where_index, PD_REGISTER_KERNEL(nonzero,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::WhereIndexKernel, phi::NonZeroKernel,
int64_t, int64_t,
int, int,
int16_t, int16_t,
......
...@@ -12,16 +12,16 @@ ...@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_grad_kernel.h" #include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL(prod_grad, PD_REGISTER_KERNEL(prod_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::ReduceProdGradKernel, phi::ProdGradKernel,
float, float,
double, double,
int, int,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_kernel.h" #include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_triu_kernel_impl.h" #include "paddle/phi/kernels/impl/tril_grad_kernel_impl.h"
PD_REGISTER_KERNEL(tril_triu, PD_REGISTER_KERNEL(tril_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TrilTriuKernel, phi::TrilGradKernel,
bool, bool,
float, float,
double, double,
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_triu_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL(tril_triu_grad, PD_REGISTER_KERNEL(tril,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TrilTriuGradKernel, phi::TrilKernel,
bool, bool,
float, float,
double, double,
......
...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/uniform_random_inplace_grad_kernel.h" #include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceGradKernel(const Context& ctx, void UniformInplaceGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
float min, float min,
float max, float max,
...@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx, ...@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_inplace_grad, PD_REGISTER_KERNEL(uniform_inplace_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomInplaceGradKernel, phi::UniformInplaceGradKernel,
float, float,
double) {} double) {}
...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/uniform_random_inplace_kernel.h" #include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceKernel(const Context& ctx, void UniformInplaceKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
float min, float min,
float max, float max,
...@@ -46,9 +46,9 @@ void UniformRandomInplaceKernel(const Context& ctx, ...@@ -46,9 +46,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_inplace, PD_REGISTER_KERNEL(uniform_inplace,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomInplaceKernel, phi::UniformInplaceKernel,
float, float,
double) {} double) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/uniform_random_kernel.h" #include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/uniform_real_distribution.h" #include "paddle/phi/kernels/funcs/uniform_real_distribution.h"
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context &dev_ctx, void UniformRawKernel(const Context &dev_ctx,
const IntArray &shape, const IntArray &shape,
DataType dtype, DataType dtype,
const Scalar &min, const Scalar &min,
...@@ -63,10 +63,10 @@ void UniformRandomRawKernel(const Context &dev_ctx, ...@@ -63,10 +63,10 @@ void UniformRandomRawKernel(const Context &dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_raw, PD_REGISTER_KERNEL(uniform_raw,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomRawKernel, phi::UniformRawKernel,
float, float,
double, double,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/yolov3_loss_grad_kernel.h" #include "paddle/phi/kernels/yolo_loss_grad_kernel.h"
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolov3_loss_functor.h" #include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
namespace phi { namespace phi {
...@@ -117,7 +117,7 @@ static inline void CalcObjnessLossGrad(T* input_grad, ...@@ -117,7 +117,7 @@ static inline void CalcObjnessLossGrad(T* input_grad,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Yolov3LossGradKernel(const Context& dev_ctx, void YoloLossGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& gt_box, const DenseTensor& gt_box,
const DenseTensor& gt_label, const DenseTensor& gt_label,
...@@ -237,9 +237,5 @@ void Yolov3LossGradKernel(const Context& dev_ctx, ...@@ -237,9 +237,5 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(yolov3_loss_grad, PD_REGISTER_KERNEL(
CPU, yolo_loss_grad, CPU, ALL_LAYOUT, phi::YoloLossGradKernel, float, double) {}
ALL_LAYOUT,
phi::Yolov3LossGradKernel,
float,
double) {}
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/yolov3_loss_kernel.h" #include "paddle/phi/kernels/yolo_loss_kernel.h"
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/yolov3_loss_functor.h" #include "paddle/phi/kernels/cpu/yolo_loss_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
namespace phi { namespace phi {
...@@ -178,7 +178,7 @@ static void inline GtValid(bool* valid, ...@@ -178,7 +178,7 @@ static void inline GtValid(bool* valid,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Yolov3LossKernel(const Context& dev_ctx, void YoloLossKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& gt_box, const DenseTensor& gt_box,
const DenseTensor& gt_label, const DenseTensor& gt_label,
...@@ -371,4 +371,4 @@ void Yolov3LossKernel(const Context& dev_ctx, ...@@ -371,4 +371,4 @@ void Yolov3LossKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
yolov3_loss, CPU, ALL_LAYOUT, phi::Yolov3LossKernel, float, double) {} yolo_loss, CPU, ALL_LAYOUT, phi::YoloLossKernel, float, double) {}
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "paddle/phi/kernels/funcs/slice.h" #include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/impl/lstsq_kernel_impl.h" #include "paddle/phi/kernels/impl/lstsq_kernel_impl.h"
#include "paddle/phi/kernels/impl/qr_kernel_impl.h" #include "paddle/phi/kernels/impl/qr_kernel_impl.h"
#include "paddle/phi/kernels/impl/tril_triu_kernel_impl.h" #include "paddle/phi/kernels/impl/tril_kernel_impl.h"
#include "paddle/phi/kernels/lstsq_kernel.h" #include "paddle/phi/kernels/lstsq_kernel.h"
#include "paddle/phi/kernels/matmul_kernel.h" #include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h" #include "paddle/phi/kernels/transpose_kernel.h"
...@@ -110,7 +110,7 @@ void LstsqKernel(const Context& dev_ctx, ...@@ -110,7 +110,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor* res_r = new DenseTensor(); DenseTensor* res_r = new DenseTensor();
res_r->Resize(phi::make_ddim({batch_count, min_mn, min_mn})); res_r->Resize(phi::make_ddim({batch_count, min_mn, min_mn}));
dev_ctx.template Alloc<T>(res_r); dev_ctx.template Alloc<T>(res_r);
phi::TrilTriuKernel<T>(dev_ctx, slice_r, 0, false, res_r); phi::TrilKernel<T>(dev_ctx, slice_r, 0, false, res_r);
DenseTensor trans_y = phi::TransposeLast2Dim<T>(dev_ctx, tmp_y); DenseTensor trans_y = phi::TransposeLast2Dim<T>(dev_ctx, tmp_y);
DenseTensor slice_y = DenseTensor slice_y =
...@@ -135,7 +135,7 @@ void LstsqKernel(const Context& dev_ctx, ...@@ -135,7 +135,7 @@ void LstsqKernel(const Context& dev_ctx,
DenseTensor* res_r = new DenseTensor(); DenseTensor* res_r = new DenseTensor();
res_r->Resize(phi::make_ddim({batch_count, min_mn, min_mn})); res_r->Resize(phi::make_ddim({batch_count, min_mn, min_mn}));
dev_ctx.template Alloc<T>(res_r); dev_ctx.template Alloc<T>(res_r);
phi::TrilTriuKernel<T>(dev_ctx, slice_r, 0, false, res_r); phi::TrilKernel<T>(dev_ctx, slice_r, 0, false, res_r);
phi::TriangularSolveKernel<T, Context>( phi::TriangularSolveKernel<T, Context>(
dev_ctx, *res_r, *new_y, true, true, false, solution); dev_ctx, *res_r, *new_y, true, true, false, solution);
......
...@@ -25,7 +25,7 @@ namespace cub = hipcub; ...@@ -25,7 +25,7 @@ namespace cub = hipcub;
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/select_impl.cu.h" #include "paddle/phi/kernels/funcs/select_impl.cu.h"
#include "paddle/phi/kernels/where_index_kernel.h" #include "paddle/phi/kernels/nonzero_kernel.h"
namespace phi { namespace phi {
template <typename MaskT, typename IndexT, typename OutT> template <typename MaskT, typename IndexT, typename OutT>
...@@ -62,7 +62,7 @@ struct IndexFunctor { ...@@ -62,7 +62,7 @@ struct IndexFunctor {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void WhereIndexKernel(const Context &dev_ctx, void NonZeroKernel(const Context &dev_ctx,
const DenseTensor &condition, const DenseTensor &condition,
DenseTensor *out) { DenseTensor *out) {
DenseTensor in_data; DenseTensor in_data;
...@@ -74,10 +74,10 @@ void WhereIndexKernel(const Context &dev_ctx, ...@@ -74,10 +74,10 @@ void WhereIndexKernel(const Context &dev_ctx,
} }
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(where_index, PD_REGISTER_KERNEL(nonzero,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::WhereIndexKernel, phi::NonZeroKernel,
int64_t, int64_t,
int, int,
int16_t, int16_t,
......
...@@ -12,16 +12,16 @@ ...@@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_grad_kernel.h" #include "paddle/phi/kernels/prod_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/prod_grad_kernel_impl.h"
PD_REGISTER_KERNEL(prod_grad, PD_REGISTER_KERNEL(prod_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::ReduceProdGradKernel, phi::ProdGradKernel,
float, float,
double, double,
int, int,
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include "paddle/phi/kernels/qr_kernel.h" #include "paddle/phi/kernels/qr_kernel.h"
#include "paddle/phi/kernels/slice_kernel.h" #include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h" #include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/tril_triu_kernel.h" #include "paddle/phi/kernels/tril_kernel.h"
namespace phi { namespace phi {
...@@ -103,12 +103,12 @@ void QrKernel(const Context& ctx, ...@@ -103,12 +103,12 @@ void QrKernel(const Context& ctx,
auto trans_qr = TransposeLast2Dim<T, Context>(ctx, qr); auto trans_qr = TransposeLast2Dim<T, Context>(ctx, qr);
auto sliced_qr = SliceKernel<T, Context>( auto sliced_qr = SliceKernel<T, Context>(
ctx, trans_qr, {trans_qr.dims().size() - 2}, {0}, {min_mn}, {1}, {}); ctx, trans_qr, {trans_qr.dims().size() - 2}, {0}, {min_mn}, {1}, {});
auto tmp_r = TrilTriu<T, Context>(ctx, sliced_qr, 0, false); auto tmp_r = Tril<T, Context>(ctx, sliced_qr, 0, false);
// Transpose 'tmp_r' to retore the original row-major order // Transpose 'tmp_r' to retore the original row-major order
phi::Copy(ctx, tmp_r, r->place(), false, r); phi::Copy(ctx, tmp_r, r->place(), false, r);
} else { } else {
auto trans_qr = TransposeLast2Dim<T, Context>(ctx, qr); auto trans_qr = TransposeLast2Dim<T, Context>(ctx, qr);
auto tmp_r = TrilTriu<T, Context>(ctx, trans_qr, 0, false); auto tmp_r = Tril<T, Context>(ctx, trans_qr, 0, false);
// Transpose 'tmp_r' to retore the original row-major order // Transpose 'tmp_r' to retore the original row-major order
phi::Copy(ctx, tmp_r, r->place(), false, r); phi::Copy(ctx, tmp_r, r->place(), false, r);
} }
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_triu_kernel_impl.h" #include "paddle/phi/kernels/impl/tril_grad_kernel_impl.h"
PD_REGISTER_KERNEL(tril_triu, PD_REGISTER_KERNEL(tril_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TrilTriuKernel, phi::TrilGradKernel,
bool, bool,
float, float,
double, double,
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/tril_triu_grad_kernel_impl.h" #include "paddle/phi/kernels/impl/tril_kernel_impl.h"
PD_REGISTER_KERNEL(tril_triu_grad, PD_REGISTER_KERNEL(tril,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::TrilTriuGradKernel, phi::TrilKernel,
bool, bool,
float, float,
double, double,
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/uniform_random_inplace_grad_kernel.h" #include "paddle/phi/kernels/uniform_inplace_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/full_kernel.h"
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceGradKernel(const Context& ctx, void UniformInplaceGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
float min, float min,
float max, float max,
...@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx, ...@@ -36,9 +36,9 @@ void UniformRandomInplaceGradKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_inplace_grad, PD_REGISTER_KERNEL(uniform_inplace_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomInplaceGradKernel, phi::UniformInplaceGradKernel,
float, float,
double) {} double) {}
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/uniform_random_inplace_kernel.h" #include "paddle/phi/kernels/uniform_inplace_kernel.h"
#include <thrust/random.h> #include <thrust/random.h>
...@@ -54,7 +54,7 @@ struct UniformGenerator { ...@@ -54,7 +54,7 @@ struct UniformGenerator {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceKernel(const Context& ctx, void UniformInplaceKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
float min, float min,
float max, float max,
...@@ -80,9 +80,9 @@ void UniformRandomInplaceKernel(const Context& ctx, ...@@ -80,9 +80,9 @@ void UniformRandomInplaceKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_inplace, PD_REGISTER_KERNEL(uniform_inplace,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomInplaceKernel, phi::UniformInplaceKernel,
float, float,
double) {} double) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/uniform_random_kernel.h" #include "paddle/phi/kernels/uniform_kernel.h"
#include <thrust/random.h> #include <thrust/random.h>
...@@ -54,7 +54,7 @@ struct UniformGenerator { ...@@ -54,7 +54,7 @@ struct UniformGenerator {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx, void UniformRawKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
...@@ -86,10 +86,10 @@ void UniformRandomRawKernel(const Context& dev_ctx, ...@@ -86,10 +86,10 @@ void UniformRandomRawKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_raw, PD_REGISTER_KERNEL(uniform_raw,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomRawKernel, phi::UniformRawKernel,
float, float,
double, double,
phi::dtype::float16) {} phi::dtype::float16) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
const std::vector<int64_t>& height_sections,
const std::vector<std::string>& epmap,
const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
DenseTensor* bias_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void HierarchicalSigmoidKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
int num_classes,
bool remote_prefetch,
int trainer_id,
const std::vector<int64_t>& height_sections,
const std::vector<std::string>& epmap,
const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* out,
DenseTensor* pre_out,
DenseTensor* w_out);
} // namespace phi
...@@ -19,24 +19,24 @@ ...@@ -19,24 +19,24 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void Yolov3LossGradKernel(const Context& dev_ctx, void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& gt_box, const DenseTensor& w,
const DenseTensor& gt_label, const DenseTensor& label,
const paddle::optional<DenseTensor>& gt_score, const paddle::optional<DenseTensor>& path,
const DenseTensor& objectness_mask, const paddle::optional<DenseTensor>& code,
const DenseTensor& gt_match_mask, const paddle::optional<DenseTensor>& bias,
const DenseTensor& loss_grad, const DenseTensor& pre_out,
const std::vector<int>& anchors, const DenseTensor& out_grad,
const std::vector<int>& anchor_mask, int num_classes,
int class_num, bool remote_prefetch,
float ignore_thresh, int trainer_id,
int downsample_ratio, const std::vector<int64_t>& height_sections,
bool use_label_smooth, const std::vector<std::string>& epmap,
float scale_x_Y, const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* gt_box_grad, DenseTensor* w_grad,
DenseTensor* gt_label_grad, DenseTensor* bias_grad);
DenseTensor* gt_score_grad);
} // namespace phi } // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void HSigmoidLossKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
int num_classes,
bool remote_prefetch,
int trainer_id,
const std::vector<int64_t>& height_sections,
const std::vector<std::string>& epmap,
const std::vector<std::string>& table_names,
bool is_sparse,
DenseTensor* out,
DenseTensor* pre_out,
DenseTensor* w_out);
} // namespace phi
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
#include "paddle/phi/common/int_array.h" #include "paddle/phi/common/int_array.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h" #include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/impl/reduce_grad.h" #include "paddle/phi/kernels/impl/reduce_grad.h"
#include "paddle/phi/kernels/reduce_prod_grad_kernel.h" #include "paddle/phi/kernels/prod_grad_kernel.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void ReduceProdGradKernel(const Context& dev_ctx, void ProdGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "paddle/phi/kernels/slice_kernel.h" #include "paddle/phi/kernels/slice_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h" #include "paddle/phi/kernels/transpose_kernel.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h" #include "paddle/phi/kernels/triangular_solve_kernel.h"
#include "paddle/phi/kernels/tril_triu_kernel.h" #include "paddle/phi/kernels/tril_kernel.h"
namespace phi { namespace phi {
...@@ -116,8 +116,8 @@ void QrGradKernel(const Context& ctx, ...@@ -116,8 +116,8 @@ void QrGradKernel(const Context& ctx,
DenseTensor M_tmp1 = Subtract<T, Context>(ctx, R_term, Q_term); DenseTensor M_tmp1 = Subtract<T, Context>(ctx, R_term, Q_term);
// Compute M = (tril(M) + tril(M).mH()) * 0.5 Identity // Compute M = (tril(M) + tril(M).mH()) * 0.5 Identity
DenseTensor M_tril_0 = TrilTriu<T, Context>(ctx, M_tmp1, 0, true); DenseTensor M_tril_0 = Tril<T, Context>(ctx, M_tmp1, 0, true);
DenseTensor M_tril_1 = TrilTriu<T, Context>(ctx, M_tmp1, -1, true); DenseTensor M_tril_1 = Tril<T, Context>(ctx, M_tmp1, -1, true);
DenseTensor M = Add<T, Context>( DenseTensor M = Add<T, Context>(
ctx, M_tril_0, TransposeLast2Dim<T, Context>(ctx, M_tril_1)); ctx, M_tril_0, TransposeLast2Dim<T, Context>(ctx, M_tril_1));
......
...@@ -16,12 +16,12 @@ ...@@ -16,12 +16,12 @@
#include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h" #include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_triu_grad_kernel.h" #include "paddle/phi/kernels/tril_grad_kernel.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuGradKernel(const Context& ctx, void TrilGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int diagonal, int diagonal,
bool lower, bool lower,
......
...@@ -16,12 +16,12 @@ ...@@ -16,12 +16,12 @@
#include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/tril_triu_compute.h" #include "paddle/phi/kernels/funcs/tril_triu_compute.h"
#include "paddle/phi/kernels/tril_triu_kernel.h" #include "paddle/phi/kernels/tril_kernel.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuKernel(const Context& ctx, void TrilKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
int diagonal, int diagonal,
bool lower, bool lower,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_kernel.h" #include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/reduce.h" #include "paddle/phi/kernels/gpu/reduce.h"
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void WhereIndexKernel(const Context& dev_ctx, void NonZeroKernel(const Context& dev_ctx,
const DenseTensor& condition, const DenseTensor& condition,
DenseTensor* out); DenseTensor* out);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void ReduceProdGradKernel(const Context& dev_ctx, void ProdGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_kernel.h" #include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/all_context.h" #include "paddle/phi/backends/all_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.h" #include "paddle/phi/kernels/selected_rows/hsigmoid_loss_grad_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h" #include "paddle/phi/kernels/cpu/hsigmoid_loss_grad.h"
namespace phi { namespace phi {
namespace sr { namespace sr {
...@@ -36,7 +36,7 @@ static std::vector<int64_t> PathToRows(const DenseTensor& path) { ...@@ -36,7 +36,7 @@ static std::vector<int64_t> PathToRows(const DenseTensor& path) {
} }
template <typename T, typename Context> template <typename T, typename Context>
void HierarchicalSigmoidGradKernel(const Context& ctx, void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
...@@ -66,7 +66,7 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -66,7 +66,7 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
phi::DDim temp_dim(w.dims()); phi::DDim temp_dim(w.dims());
temp_dim[0] = real_rows.size(); temp_dim[0] = real_rows.size();
w_grad_value->Resize(temp_dim); w_grad_value->Resize(temp_dim);
phi::HierarchicalSigmoidGradKernelImpl<T>(ctx, phi::HSigmoidLossGradKernelImpl<T>(ctx,
x, x,
w, w,
label, label,
...@@ -91,9 +91,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -91,9 +91,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
} // namespace sr } // namespace sr
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(hierarchical_sigmoid_grad_sr, PD_REGISTER_KERNEL(hsigmoid_loss_grad_sr,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sr::HierarchicalSigmoidGradKernel, phi::sr::HSigmoidLossGradKernel,
float, float,
double) {} double) {}
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
namespace sr { namespace sr {
template <typename T, typename Context> template <typename T, typename Context>
void HierarchicalSigmoidGradKernel(const Context& ctx, void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
......
...@@ -12,18 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,18 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/selected_rows/uniform_random_kernel.h" #include "paddle/phi/kernels/selected_rows/uniform_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/uniform_random_kernel.h" #include "paddle/phi/kernels/uniform_kernel.h"
namespace phi { namespace phi {
namespace sr { namespace sr {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx, void UniformRawKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
...@@ -33,7 +33,7 @@ void UniformRandomRawKernel(const Context& dev_ctx, ...@@ -33,7 +33,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
int diag_step, int diag_step,
float diag_val, float diag_val,
SelectedRows* out) { SelectedRows* out) {
phi::UniformRandomRawKernel<T>(dev_ctx, phi::UniformRawKernel<T>(dev_ctx,
shape, shape,
dtype, dtype,
min, min,
...@@ -46,61 +46,51 @@ void UniformRandomRawKernel(const Context& dev_ctx, ...@@ -46,61 +46,51 @@ void UniformRandomRawKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx, void UniformKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
const Scalar& max, const Scalar& max,
int seed, int seed,
SelectedRows* out) { SelectedRows* out) {
phi::UniformRandomKernel<T>( phi::UniformKernel<T>(
dev_ctx, shape, dtype, min, max, seed, out->mutable_value()); dev_ctx, shape, dtype, min, max, seed, out->mutable_value());
} }
} // namespace sr } // namespace sr
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random_raw_sr, PD_REGISTER_KERNEL(uniform_raw_sr,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sr::UniformRandomRawKernel, phi::sr::UniformRawKernel,
float, float,
double, double,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(uniform_random_sr, PD_REGISTER_KERNEL(uniform_sr,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sr::UniformRandomKernel, phi::sr::UniformKernel,
float, float,
double, double,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL(uniform_random_raw_sr, PD_REGISTER_KERNEL(
GPU, uniform_raw_sr, GPU, ALL_LAYOUT, phi::sr::UniformRawKernel, float, double) {
ALL_LAYOUT, }
phi::sr::UniformRandomRawKernel,
float,
double) {}
PD_REGISTER_KERNEL(uniform_random_sr, PD_REGISTER_KERNEL(
GPU, uniform_sr, GPU, ALL_LAYOUT, phi::sr::UniformKernel, float, double) {}
ALL_LAYOUT,
phi::sr::UniformRandomKernel,
float,
double) {}
#endif #endif
#if defined(PADDLE_WITH_XPU) #if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(uniform_random_raw_sr,
XPU,
ALL_LAYOUT,
phi::sr::UniformRandomRawKernel,
float) {}
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
uniform_random_sr, XPU, ALL_LAYOUT, phi::sr::UniformRandomKernel, float) {} uniform_raw_sr, XPU, ALL_LAYOUT, phi::sr::UniformRawKernel, float) {}
PD_REGISTER_KERNEL(uniform_sr, XPU, ALL_LAYOUT, phi::sr::UniformKernel, float) {
}
#endif #endif
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
namespace sr { namespace sr {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx, void UniformRawKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
...@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx, ...@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
SelectedRows* out); SelectedRows* out);
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx, void UniformKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuGradKernel(const Context& ctx, void TrilGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int diagonal, int diagonal,
bool lower, bool lower,
......
...@@ -20,21 +20,21 @@ ...@@ -20,21 +20,21 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuKernel(const Context& ctx, void TrilKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
int diagonal, int diagonal,
bool lower, bool lower,
DenseTensor* out); DenseTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor TrilTriu(const Context& ctx, DenseTensor Tril(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
int diagonal, int diagonal,
bool lower) { bool lower) {
DenseTensor dense_out; DenseTensor dense_out;
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
TrilTriuInferMeta(x, diagonal, lower, &meta_out); TrilInferMeta(x, diagonal, lower, &meta_out);
TrilTriuKernel<T, Context>(ctx, x, diagonal, lower, &dense_out); TrilKernel<T, Context>(ctx, x, diagonal, lower, &dense_out);
return dense_out; return dense_out;
} }
......
...@@ -19,14 +19,14 @@ limitations under the License. */ ...@@ -19,14 +19,14 @@ limitations under the License. */
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceKernel(const Context& ctx, void UniformInplaceGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& out_grad,
float min, float min,
float max, float max,
int seed, int seed,
int diag_num, int diag_num,
int diag_step, int diag_step,
float diag_val, float diag_val,
DenseTensor* out); DenseTensor* x_grad);
} // namespace phi } // namespace phi
...@@ -19,14 +19,14 @@ limitations under the License. */ ...@@ -19,14 +19,14 @@ limitations under the License. */
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomInplaceGradKernel(const Context& ctx, void UniformInplaceKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& x,
float min, float min,
float max, float max,
int seed, int seed,
int diag_num, int diag_num,
int diag_step, int diag_step,
float diag_val, float diag_val,
DenseTensor* x_grad); DenseTensor* out);
} // namespace phi } // namespace phi
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/uniform_random_kernel.h" #include "paddle/phi/kernels/uniform_kernel.h"
#include "paddle/phi/common/int_array.h" #include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h" #include "paddle/phi/common/scalar.h"
...@@ -29,38 +29,36 @@ ...@@ -29,38 +29,36 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx, void UniformKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
const Scalar& max, const Scalar& max,
int seed, int seed,
DenseTensor* out) { DenseTensor* out) {
UniformRandomRawKernel<T>( UniformRawKernel<T>(dev_ctx, shape, dtype, min, max, seed, 0, 0, 0.0f, out);
dev_ctx, shape, dtype, min, max, seed, 0, 0, 0.0f, out);
} }
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(uniform_random, PD_REGISTER_KERNEL(uniform,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomKernel, phi::UniformKernel,
float, float,
double, double,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL(uniform_random, PD_REGISTER_KERNEL(uniform,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::UniformRandomKernel, phi::UniformKernel,
float, float,
double, double,
phi::dtype::float16) {} phi::dtype::float16) {}
#endif #endif
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(uniform, XPU, ALL_LAYOUT, phi::UniformKernel, float) {}
uniform_random, XPU, ALL_LAYOUT, phi::UniformRandomKernel, float) {}
#endif #endif
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context& dev_ctx, void UniformRawKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
...@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx, ...@@ -34,7 +34,7 @@ void UniformRandomRawKernel(const Context& dev_ctx,
DenseTensor* out); DenseTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomKernel(const Context& dev_ctx, void UniformKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype,
const Scalar& min, const Scalar& min,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/where_index_kernel.h" #include "paddle/phi/kernels/nonzero_kernel.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h" #include "paddle/fluid/platform/device/xpu/xpu_header.h"
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void WhereIndexKernel(const Context& dev_ctx, void NonZeroKernel(const Context& dev_ctx,
const DenseTensor& condition, const DenseTensor& condition,
DenseTensor* out) { DenseTensor* out) {
const T* cond_data = condition.data<T>(); const T* cond_data = condition.data<T>();
...@@ -69,4 +69,4 @@ void WhereIndexKernel(const Context& dev_ctx, ...@@ -69,4 +69,4 @@ void WhereIndexKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
where_index, XPU, ALL_LAYOUT, phi::WhereIndexKernel, int, bool, float) {} nonzero, XPU, ALL_LAYOUT, phi::NonZeroKernel, int, bool, float) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/reduce_prod_kernel.h" #include "paddle/phi/kernels/prod_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h" #include "paddle/phi/backends/xpu/xpu_context.h"
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/tril_triu_grad_kernel.h" #include "paddle/phi/kernels/tril_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuGradKernel(const Context& ctx, void TrilGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int diagonal, int diagonal,
bool lower, bool lower,
...@@ -49,4 +49,4 @@ void TrilTriuGradKernel(const Context& ctx, ...@@ -49,4 +49,4 @@ void TrilTriuGradKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
tril_triu_grad, XPU, ALL_LAYOUT, phi::TrilTriuGradKernel, int, float) {} tril_grad, XPU, ALL_LAYOUT, phi::TrilGradKernel, int, float) {}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/tril_triu_kernel.h" #include "paddle/phi/kernels/tril_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h" #include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilTriuKernel(const Context& ctx, void TrilKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
int diagonal, int diagonal,
bool lower, bool lower,
...@@ -48,5 +48,4 @@ void TrilTriuKernel(const Context& ctx, ...@@ -48,5 +48,4 @@ void TrilTriuKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(tril, XPU, ALL_LAYOUT, phi::TrilKernel, int, float) {}
tril_triu, XPU, ALL_LAYOUT, phi::TrilTriuKernel, int, float) {}
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/uniform_random_kernel.h" #include "paddle/phi/kernels/uniform_kernel.h"
#include <string> #include <string>
...@@ -24,7 +24,7 @@ limitations under the License. */ ...@@ -24,7 +24,7 @@ limitations under the License. */
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformRandomRawKernel(const Context &dev_ctx, void UniformRawKernel(const Context &dev_ctx,
const IntArray &shape, const IntArray &shape,
DataType dtype, DataType dtype,
const Scalar &min, const Scalar &min,
...@@ -76,5 +76,5 @@ void UniformRandomRawKernel(const Context &dev_ctx, ...@@ -76,5 +76,5 @@ void UniformRandomRawKernel(const Context &dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(uniform_raw, XPU, ALL_LAYOUT, phi::UniformRawKernel, float) {
uniform_random_raw, XPU, ALL_LAYOUT, phi::UniformRandomRawKernel, float) {} }
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void YoloLossGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
const paddle::optional<DenseTensor>& gt_score,
const DenseTensor& objectness_mask,
const DenseTensor& gt_match_mask,
const DenseTensor& loss_grad,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
int downsample_ratio,
bool use_label_smooth,
float scale_x_Y,
DenseTensor* x_grad,
DenseTensor* gt_box_grad,
DenseTensor* gt_label_grad,
DenseTensor* gt_score_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void YoloLossKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
const paddle::optional<DenseTensor>& gt_score,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
int downsample_ratio,
bool use_label_smooth,
float scale_x_Y,
DenseTensor* loss,
DenseTensor* objectness_mask,
DenseTensor* gt_match_mask);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void Yolov3LossKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
const paddle::optional<DenseTensor>& gt_score,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
int downsample_ratio,
bool use_label_smooth,
float scale_x_Y,
DenseTensor* loss,
DenseTensor* objectness_mask,
DenseTensor* gt_match_mask);
} // namespace phi
...@@ -18,7 +18,7 @@ namespace phi { ...@@ -18,7 +18,7 @@ namespace phi {
KernelSignature HierarchicalSigmoidOpArgumentMapping( KernelSignature HierarchicalSigmoidOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("hierarchical_sigmoid", return KernelSignature("hsigmoid_loss",
{"X", "W", "Label", "PathTable", "PathCode", "Bias"}, {"X", "W", "Label", "PathTable", "PathCode", "Bias"},
{"num_classes", {"num_classes",
"remote_prefetch", "remote_prefetch",
...@@ -33,7 +33,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping( ...@@ -33,7 +33,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
KernelSignature HierarchicalSigmoidGradOpArgumentMapping( KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
if (ctx.IsDenseTensorOutput("W@GRAD")) { if (ctx.IsDenseTensorOutput("W@GRAD")) {
return KernelSignature("hierarchical_sigmoid_grad", return KernelSignature("hsigmoid_loss_grad",
{"X", {"X",
"W", "W",
"Label", "Label",
...@@ -51,7 +51,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -51,7 +51,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"is_sparse"}, "is_sparse"},
{"X@GRAD", "W@GRAD", "Bias@GRAD"}); {"X@GRAD", "W@GRAD", "Bias@GRAD"});
} else if (ctx.IsSelectedRowsOutput("W@GRAD")) { } else if (ctx.IsSelectedRowsOutput("W@GRAD")) {
return KernelSignature("hierarchical_sigmoid_grad_sr", return KernelSignature("hsigmoid_loss_grad_sr",
{"X", {"X",
"W", "W",
"Label", "Label",
...@@ -75,6 +75,9 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -75,6 +75,9 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(hierarchical_sigmoid, hsigmoid_loss);
PD_REGISTER_BASE_KERNEL_NAME(hierarchical_sigmoid_grad, hsigmoid_loss_grad);
PD_REGISTER_ARG_MAPPING_FN(hierarchical_sigmoid, PD_REGISTER_ARG_MAPPING_FN(hierarchical_sigmoid,
phi::HierarchicalSigmoidOpArgumentMapping); phi::HierarchicalSigmoidOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(hierarchical_sigmoid_grad, PD_REGISTER_ARG_MAPPING_FN(hierarchical_sigmoid_grad,
......
...@@ -17,16 +17,19 @@ limitations under the License. */ ...@@ -17,16 +17,19 @@ limitations under the License. */
namespace phi { namespace phi {
KernelSignature TrilTriuOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TrilTriuOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("tril_triu", {"X"}, {"diagonal", "lower"}, {"Out"}); return KernelSignature("tril", {"X"}, {"diagonal", "lower"}, {"Out"});
} }
KernelSignature TrilTriuGradOpArgumentMapping( KernelSignature TrilTriuGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"tril_triu_grad", {"Out@GRAD"}, {"diagonal", "lower"}, {"X@GRAD"}); "tril_grad", {"Out@GRAD"}, {"diagonal", "lower"}, {"X@GRAD"});
} }
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(tril_triu, tril);
PD_REGISTER_BASE_KERNEL_NAME(tril_triu_grad, tril_grad);
PD_REGISTER_ARG_MAPPING_FN(tril_triu, phi::TrilTriuOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(tril_triu, phi::TrilTriuOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(tril_triu_grad, phi::TrilTriuGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(tril_triu_grad, phi::TrilTriuGradOpArgumentMapping);
...@@ -18,7 +18,7 @@ namespace phi { ...@@ -18,7 +18,7 @@ namespace phi {
KernelSignature UniformRandomInplaceOpArgumentMapping( KernelSignature UniformRandomInplaceOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"uniform_random_inplace", "uniform_inplace",
{"X"}, {"X"},
{"min", "max", "seed", "diag_num", "diag_step", "diag_val"}, {"min", "max", "seed", "diag_num", "diag_step", "diag_val"},
{"Out"}); {"Out"});
...@@ -27,7 +27,7 @@ KernelSignature UniformRandomInplaceOpArgumentMapping( ...@@ -27,7 +27,7 @@ KernelSignature UniformRandomInplaceOpArgumentMapping(
KernelSignature UniformRandomInplaceGradOpArgumentMapping( KernelSignature UniformRandomInplaceGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"uniform_random_inplace_grad", "uniform_inplace_grad",
{"Out@GRAD"}, {"Out@GRAD"},
{"min", "max", "seed", "diag_num", "diag_step", "diag_val"}, {"min", "max", "seed", "diag_num", "diag_step", "diag_val"},
{"X@GRAD"}); {"X@GRAD"});
...@@ -35,6 +35,8 @@ KernelSignature UniformRandomInplaceGradOpArgumentMapping( ...@@ -35,6 +35,8 @@ KernelSignature UniformRandomInplaceGradOpArgumentMapping(
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(uniform_random_inplace, uniform_inplace);
PD_REGISTER_ARG_MAPPING_FN(uniform_random_inplace, PD_REGISTER_ARG_MAPPING_FN(uniform_random_inplace,
phi::UniformRandomInplaceOpArgumentMapping); phi::UniformRandomInplaceOpArgumentMapping);
......
...@@ -22,7 +22,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -22,7 +22,7 @@ KernelSignature UniformRandomOpArgumentMapping(
if (ctx.IsDenseTensorOutput("Out")) { if (ctx.IsDenseTensorOutput("Out")) {
if (diag_num) { if (diag_num) {
if (ctx.InputSize("ShapeTensorList") > 0) { if (ctx.InputSize("ShapeTensorList") > 0) {
return KernelSignature("uniform_random_raw", return KernelSignature("uniform_raw",
{}, {},
{"ShapeTensorList", {"ShapeTensorList",
"dtype", "dtype",
...@@ -37,7 +37,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -37,7 +37,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const auto& shape = const auto& shape =
paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape")); paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape"));
if (ctx.HasInput("ShapeTensor") && shape.empty()) { if (ctx.HasInput("ShapeTensor") && shape.empty()) {
return KernelSignature("uniform_random_raw", return KernelSignature("uniform_raw",
{}, {},
{"ShapeTensor", {"ShapeTensor",
"dtype", "dtype",
...@@ -49,7 +49,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -49,7 +49,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"}, "diag_val"},
{"Out"}); {"Out"});
} else { } else {
return KernelSignature("uniform_random_raw", return KernelSignature("uniform_raw",
{}, {},
{"shape", {"shape",
"dtype", "dtype",
...@@ -65,7 +65,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -65,7 +65,7 @@ KernelSignature UniformRandomOpArgumentMapping(
} else { } else {
if (ctx.InputSize("ShapeTensorList") > 0) { if (ctx.InputSize("ShapeTensorList") > 0) {
return KernelSignature( return KernelSignature(
"uniform_random", "uniform",
{}, {},
{"ShapeTensorList", "dtype", "min", "max", "seed"}, {"ShapeTensorList", "dtype", "min", "max", "seed"},
{"Out"}); {"Out"});
...@@ -73,22 +73,20 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -73,22 +73,20 @@ KernelSignature UniformRandomOpArgumentMapping(
const auto& shape = const auto& shape =
paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape")); paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape"));
if (ctx.HasInput("ShapeTensor") && shape.empty()) { if (ctx.HasInput("ShapeTensor") && shape.empty()) {
return KernelSignature("uniform_random", return KernelSignature("uniform",
{}, {},
{"ShapeTensor", "dtype", "min", "max", "seed"}, {"ShapeTensor", "dtype", "min", "max", "seed"},
{"Out"}); {"Out"});
} else { } else {
return KernelSignature("uniform_random", return KernelSignature(
{}, "uniform", {}, {"shape", "dtype", "min", "max", "seed"}, {"Out"});
{"shape", "dtype", "min", "max", "seed"},
{"Out"});
} }
} }
} }
} else if (ctx.IsSelectedRowsOutput("Out")) { } else if (ctx.IsSelectedRowsOutput("Out")) {
if (diag_num) { if (diag_num) {
if (ctx.InputSize("ShapeTensorList") > 0) { if (ctx.InputSize("ShapeTensorList") > 0) {
return KernelSignature("uniform_random_raw_sr", return KernelSignature("uniform_raw_sr",
{}, {},
{"ShapeTensorList", {"ShapeTensorList",
"dtype", "dtype",
...@@ -103,7 +101,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -103,7 +101,7 @@ KernelSignature UniformRandomOpArgumentMapping(
const auto& shape = const auto& shape =
paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape")); paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape"));
if (ctx.HasInput("ShapeTensor") && shape.empty()) { if (ctx.HasInput("ShapeTensor") && shape.empty()) {
return KernelSignature("uniform_random_raw_sr", return KernelSignature("uniform_raw_sr",
{}, {},
{"ShapeTensor", {"ShapeTensor",
"dtype", "dtype",
...@@ -115,7 +113,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -115,7 +113,7 @@ KernelSignature UniformRandomOpArgumentMapping(
"diag_val"}, "diag_val"},
{"Out"}); {"Out"});
} else { } else {
return KernelSignature("uniform_random_raw_sr", return KernelSignature("uniform_raw_sr",
{}, {},
{"shape", {"shape",
"dtype", "dtype",
...@@ -131,7 +129,7 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -131,7 +129,7 @@ KernelSignature UniformRandomOpArgumentMapping(
} else { } else {
if (ctx.InputSize("ShapeTensorList") > 0) { if (ctx.InputSize("ShapeTensorList") > 0) {
return KernelSignature( return KernelSignature(
"uniform_random_sr", "uniform_sr",
{}, {},
{"ShapeTensorList", "dtype", "min", "max", "seed"}, {"ShapeTensorList", "dtype", "min", "max", "seed"},
{"Out"}); {"Out"});
...@@ -139,12 +137,12 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -139,12 +137,12 @@ KernelSignature UniformRandomOpArgumentMapping(
const auto& shape = const auto& shape =
paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape")); paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape"));
if (ctx.HasInput("ShapeTensor") && shape.empty()) { if (ctx.HasInput("ShapeTensor") && shape.empty()) {
return KernelSignature("uniform_random_sr", return KernelSignature("uniform_sr",
{}, {},
{"ShapeTensor", "dtype", "min", "max", "seed"}, {"ShapeTensor", "dtype", "min", "max", "seed"},
{"Out"}); {"Out"});
} else { } else {
return KernelSignature("uniform_random_sr", return KernelSignature("uniform_sr",
{}, {},
{"shape", "dtype", "min", "max", "seed"}, {"shape", "dtype", "min", "max", "seed"},
{"Out"}); {"Out"});
...@@ -156,4 +154,6 @@ KernelSignature UniformRandomOpArgumentMapping( ...@@ -156,4 +154,6 @@ KernelSignature UniformRandomOpArgumentMapping(
} }
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(uniform_random, uniform);
PD_REGISTER_ARG_MAPPING_FN(uniform_random, phi::UniformRandomOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(uniform_random, phi::UniformRandomOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature WhereIndexOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("nonzero", {"Condition"}, {}, {"Out"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(where_index, nonzero);
PD_REGISTER_ARG_MAPPING_FN(where_index, phi::WhereIndexOpArgumentMapping);
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
namespace phi { namespace phi {
KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("yolov3_loss", return KernelSignature("yolo_loss",
{"X", "GTBox", "GTLabel", "GTScore"}, {"X", "GTBox", "GTLabel", "GTScore"},
{"anchors", {"anchors",
"anchor_mask", "anchor_mask",
...@@ -32,7 +32,7 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) { ...@@ -32,7 +32,7 @@ KernelSignature Yolov3LossOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature Yolov3LossGradOpArgumentMapping( KernelSignature Yolov3LossGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"yolov3_loss_grad", "yolo_loss_grad",
{"X", {"X",
"GTBox", "GTBox",
"GTLabel", "GTLabel",
...@@ -51,6 +51,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping( ...@@ -51,6 +51,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping(
} }
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(yolov3_loss, yolo_loss);
PD_REGISTER_BASE_KERNEL_NAME(yolov3_loss_grad, yolo_loss_grad);
PD_REGISTER_ARG_MAPPING_FN(yolov3_loss, phi::Yolov3LossOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(yolov3_loss, phi::Yolov3LossOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(yolov3_loss_grad, PD_REGISTER_ARG_MAPPING_FN(yolov3_loss_grad,
phi::Yolov3LossGradOpArgumentMapping); phi::Yolov3LossGradOpArgumentMapping);
...@@ -309,7 +309,7 @@ class UniformInitializer(Initializer): ...@@ -309,7 +309,7 @@ class UniformInitializer(Initializer):
if framework._non_static_mode(): if framework._non_static_mode():
if in_dygraph_mode(): if in_dygraph_mode():
out_var = _C_ops.uniform_random( out_var = _C_ops.uniform(
var.shape, var.shape,
out_dtype, out_dtype,
self._low, self._low,
...@@ -711,7 +711,7 @@ class XavierInitializer(Initializer): ...@@ -711,7 +711,7 @@ class XavierInitializer(Initializer):
if self._uniform: if self._uniform:
limit = math.sqrt(6.0 / float(fan_in + fan_out)) limit = math.sqrt(6.0 / float(fan_in + fan_out))
if in_dygraph_mode(): if in_dygraph_mode():
out_var = _C_ops.uniform_random( out_var = _C_ops.uniform(
out_var.shape, out_var.shape,
out_dtype, out_dtype,
-limit, -limit,
...@@ -923,7 +923,7 @@ class MSRAInitializer(Initializer): ...@@ -923,7 +923,7 @@ class MSRAInitializer(Initializer):
gain = calculate_gain(self._nonlinearity, self._negative_slope) gain = calculate_gain(self._nonlinearity, self._negative_slope)
limit = gain * math.sqrt(3.0 / float(fan_in)) limit = gain * math.sqrt(3.0 / float(fan_in))
if in_dygraph_mode(): if in_dygraph_mode():
out_var = _C_ops.uniform_random( out_var = _C_ops.uniform(
var.shape, var.shape,
out_dtype, out_dtype,
-limit, -limit,
......
...@@ -5385,7 +5385,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): ...@@ -5385,7 +5385,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
) )
) )
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.reduce_prod( return _C_ops.prod(
input, input,
dim if dim != None and dim != [] else [0], dim if dim != None and dim != [] else [0],
keep_dim, keep_dim,
...@@ -15548,7 +15548,7 @@ def where(condition): ...@@ -15548,7 +15548,7 @@ def where(condition):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.where_index(condition) return _C_ops.nonzero(condition)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.where_index(condition) return _legacy_C_ops.where_index(condition)
...@@ -16567,7 +16567,7 @@ def uniform_random( ...@@ -16567,7 +16567,7 @@ def uniform_random(
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform_random( return _C_ops.uniform(
shape, shape,
dtype, dtype,
float(min), float(min),
......
...@@ -1017,7 +1017,7 @@ def hsigmoid_loss( ...@@ -1017,7 +1017,7 @@ def hsigmoid_loss(
# [1.92374969]] # [1.92374969]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
out, _, _ = _C_ops.hierarchical_sigmoid( out, _, _ = _C_ops.hsigmoid_loss(
input, input,
weight, weight,
label, label,
......
...@@ -1097,7 +1097,7 @@ def tril(x, diagonal=0, name=None): ...@@ -1097,7 +1097,7 @@ def tril(x, diagonal=0, name=None):
# [9 , 10, 0 , 0 ]]) # [9 , 10, 0 , 0 ]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tril_triu(x, diagonal, True) return _C_ops.tril(x, diagonal, True)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, 'tril_triu') op = getattr(_legacy_C_ops, 'tril_triu')
...@@ -1163,7 +1163,7 @@ def triu(x, diagonal=0, name=None): ...@@ -1163,7 +1163,7 @@ def triu(x, diagonal=0, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tril_triu(x, diagonal, False) return _C_ops.tril(x, diagonal, False)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, 'tril_triu') op = getattr(_legacy_C_ops, 'tril_triu')
......
...@@ -3706,7 +3706,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): ...@@ -3706,7 +3706,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
dim = [0] dim = [0]
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.reduce_prod(x, dim, keepdim, reduce_all) return _C_ops.prod(x, dim, keepdim, reduce_all)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.reduce_prod( return _legacy_C_ops.reduce_prod(
x, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', reduce_all x, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', reduce_all
......
...@@ -584,7 +584,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -584,7 +584,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform_random( return _C_ops.uniform(
shape, shape,
dtype, dtype,
float(min), float(min),
...@@ -664,7 +664,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None): ...@@ -664,7 +664,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.uniform_random_inplace_(x, min, max, seed, 0, 0, 1.0) return _C_ops.uniform_inplace_(x, min, max, seed, 0, 0, 1.0)
else: else:
return _legacy_C_ops.uniform_random_inplace_( return _legacy_C_ops.uniform_random_inplace_(
x, 'min', min, 'max', max, 'seed', seed x, 'min', min, 'max', max, 'seed', seed
......
...@@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False): ...@@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False):
rank = len(shape) rank = len(shape)
if in_dygraph_mode(): if in_dygraph_mode():
outs = _C_ops.where_index(x) outs = _C_ops.nonzero(x)
elif paddle.in_dynamic_mode(): elif paddle.in_dynamic_mode():
outs = _legacy_C_ops.where_index(x) outs = _legacy_C_ops.where_index(x)
else: else:
......
...@@ -197,7 +197,7 @@ def yolo_loss( ...@@ -197,7 +197,7 @@ def yolo_loss(
""" """
if in_dygraph_mode(): if in_dygraph_mode():
loss, _, _ = _C_ops.yolov3_loss( loss, _, _ = _C_ops.yolo_loss(
x, x,
gt_box, gt_box,
gt_label, gt_label,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册