未验证 提交 3951c40d 编写于 作者: Z zhangyuqin1998 提交者: GitHub

delete remote_prefetch (#52748)

上级 f03dcff7
...@@ -124,7 +124,6 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -124,7 +124,6 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<AttrType>("num_classes", "(int, optional), The number of classes") AddAttr<AttrType>("num_classes", "(int, optional), The number of classes")
.SetDefault(2); .SetDefault(2);
// for parameter prefetch // for parameter prefetch
AddAttr<bool>("remote_prefetch", "").SetDefault(false);
AddAttr<int>("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr<int>("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0);
AddAttr<std::vector<int64_t>>("height_sections", AddAttr<std::vector<int64_t>>("height_sections",
"Height for each output SelectedRows.") "Height for each output SelectedRows.")
......
...@@ -475,8 +475,8 @@ ...@@ -475,8 +475,8 @@
func : heaviside_grad func : heaviside_grad
- backward_op : hsigmoid_loss_grad - backward_op : hsigmoid_loss_grad
forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out) forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, bool is_sparse) args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool is_sparse)
output : Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad) output : Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta : infer_meta :
func : GeneralTernaryGradInferMeta func : GeneralTernaryGradInferMeta
......
...@@ -684,7 +684,7 @@ ...@@ -684,7 +684,7 @@
backward : heaviside_grad backward : heaviside_grad
- op : hsigmoid_loss - op : hsigmoid_loss
args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse) args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out) output : Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta : infer_meta :
func : HSigmoidLossInferMeta func : HSigmoidLossInferMeta
......
...@@ -1432,7 +1432,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x, ...@@ -1432,7 +1432,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& path, const MetaTensor& path,
const MetaTensor& code, const MetaTensor& code,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
MetaTensor* out, MetaTensor* out,
MetaTensor* pre_out, MetaTensor* pre_out,
......
...@@ -312,7 +312,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x, ...@@ -312,7 +312,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& path, const MetaTensor& path,
const MetaTensor& code, const MetaTensor& code,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
MetaTensor* out, MetaTensor* out,
MetaTensor* pre_out, MetaTensor* pre_out,
......
...@@ -35,7 +35,6 @@ void HSigmoidLossGradKernelImpl(const Context& ctx, ...@@ -35,7 +35,6 @@ void HSigmoidLossGradKernelImpl(const Context& ctx,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* w_grad, DenseTensor* w_grad,
......
...@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* w_grad, DenseTensor* w_grad,
...@@ -46,7 +45,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -46,7 +45,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
pre_out, pre_out,
out_grad, out_grad,
num_classes, num_classes,
remote_prefetch,
is_sparse, is_sparse,
x_grad, x_grad,
w_grad, w_grad,
......
...@@ -34,7 +34,6 @@ void HSigmoidLossKernel(const Context& ctx, ...@@ -34,7 +34,6 @@ void HSigmoidLossKernel(const Context& ctx,
const paddle::optional<DenseTensor>& path, const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code, const paddle::optional<DenseTensor>& code,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* out, DenseTensor* out,
DenseTensor* pre_out, DenseTensor* pre_out,
......
...@@ -29,7 +29,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -29,7 +29,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* w_grad, DenseTensor* w_grad,
......
...@@ -27,7 +27,6 @@ void HSigmoidLossKernel(const Context& ctx, ...@@ -27,7 +27,6 @@ void HSigmoidLossKernel(const Context& ctx,
const paddle::optional<DenseTensor>& path, const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code, const paddle::optional<DenseTensor>& code,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* out, DenseTensor* out,
DenseTensor* pre_out, DenseTensor* pre_out,
......
...@@ -48,7 +48,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -48,7 +48,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
SelectedRows* w_grad, SelectedRows* w_grad,
...@@ -74,7 +73,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -74,7 +73,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
pre_out, pre_out,
out_grad, out_grad,
num_classes, num_classes,
remote_prefetch,
is_sparse, is_sparse,
x_grad, x_grad,
w_grad_value, w_grad_value,
......
...@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx, ...@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch,
bool is_sparse, bool is_sparse,
DenseTensor* x_grad, DenseTensor* x_grad,
SelectedRows* w_grad, SelectedRows* w_grad,
......
...@@ -20,7 +20,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping( ...@@ -20,7 +20,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("hsigmoid_loss", return KernelSignature("hsigmoid_loss",
{"X", "Label", "W", "Bias", "PathTable", "PathCode"}, {"X", "Label", "W", "Bias", "PathTable", "PathCode"},
{"num_classes", "remote_prefetch", "is_sparse"}, {"num_classes", "is_sparse"},
{"Out", "PreOut", "W_Out"}); {"Out", "PreOut", "W_Out"});
} }
...@@ -36,7 +36,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -36,7 +36,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"Bias", "Bias",
"PreOut", "PreOut",
"Out@GRAD"}, "Out@GRAD"},
{"num_classes", "remote_prefetch", "is_sparse"}, {"num_classes", "is_sparse"},
{"X@GRAD", "W@GRAD", "Bias@GRAD"}); {"X@GRAD", "W@GRAD", "Bias@GRAD"});
} else if (ctx.IsSelectedRowsOutput("W@GRAD")) { } else if (ctx.IsSelectedRowsOutput("W@GRAD")) {
return KernelSignature("hsigmoid_loss_grad_sr", return KernelSignature("hsigmoid_loss_grad_sr",
...@@ -48,7 +48,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -48,7 +48,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"Bias", "Bias",
"PreOut", "PreOut",
"Out@GRAD"}, "Out@GRAD"},
{"num_classes", "remote_prefetch", "is_sparse"}, {"num_classes", "is_sparse"},
{"X@GRAD", "W@GRAD", "Bias@GRAD"}); {"X@GRAD", "W@GRAD", "Bias@GRAD"});
} else { } else {
return KernelSignature("unregistered", {}, {}, {}); return KernelSignature("unregistered", {}, {}, {});
......
...@@ -177,7 +177,6 @@ def python_api( ...@@ -177,7 +177,6 @@ def python_api(
path_code=None, path_code=None,
num_classes=-1, num_classes=-1,
is_sparse=False, is_sparse=False,
remote_prefetch=False,
): ):
return paddle.nn.functional.hsigmoid_loss( return paddle.nn.functional.hsigmoid_loss(
input, input,
......
...@@ -1016,7 +1016,6 @@ def hsigmoid_loss( ...@@ -1016,7 +1016,6 @@ def hsigmoid_loss(
attrs = { attrs = {
"num_classes": num_classes, "num_classes": num_classes,
"is_sparse": is_sparse, "is_sparse": is_sparse,
"remote_prefetch": is_sparse,
} }
inputs = { inputs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册