未验证 提交 3951c40d 编写于 作者: Z zhangyuqin1998 提交者: GitHub

delete remote_prefetch (#52748)

上级 f03dcff7
......@@ -124,7 +124,6 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<AttrType>("num_classes", "(int, optional), The number of classes")
.SetDefault(2);
// for parameter prefetch
AddAttr<bool>("remote_prefetch", "").SetDefault(false);
AddAttr<int>("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0);
AddAttr<std::vector<int64_t>>("height_sections",
"Height for each output SelectedRows.")
......
......@@ -475,8 +475,8 @@
func : heaviside_grad
- backward_op : hsigmoid_loss_grad
forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, bool is_sparse)
forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool is_sparse)
output : Tensor(x_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
......
......@@ -684,7 +684,7 @@
backward : heaviside_grad
- op : hsigmoid_loss
args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse)
args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out)
infer_meta :
func : HSigmoidLossInferMeta
......
......@@ -1432,7 +1432,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& path,
const MetaTensor& code,
int num_classes,
bool remote_prefetch,
bool is_sparse,
MetaTensor* out,
MetaTensor* pre_out,
......
......@@ -312,7 +312,6 @@ void HSigmoidLossInferMeta(const MetaTensor& x,
const MetaTensor& path,
const MetaTensor& code,
int num_classes,
bool remote_prefetch,
bool is_sparse,
MetaTensor* out,
MetaTensor* pre_out,
......
......@@ -35,7 +35,6 @@ void HSigmoidLossGradKernelImpl(const Context& ctx,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
......
......@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
......@@ -46,7 +45,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
pre_out,
out_grad,
num_classes,
remote_prefetch,
is_sparse,
x_grad,
w_grad,
......
......@@ -34,7 +34,6 @@ void HSigmoidLossKernel(const Context& ctx,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* out,
DenseTensor* pre_out,
......
......@@ -29,7 +29,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* x_grad,
DenseTensor* w_grad,
......
......@@ -27,7 +27,6 @@ void HSigmoidLossKernel(const Context& ctx,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* out,
DenseTensor* pre_out,
......
......@@ -48,7 +48,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* x_grad,
SelectedRows* w_grad,
......@@ -74,7 +73,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
pre_out,
out_grad,
num_classes,
remote_prefetch,
is_sparse,
x_grad,
w_grad_value,
......
......@@ -31,7 +31,6 @@ void HSigmoidLossGradKernel(const Context& ctx,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
bool is_sparse,
DenseTensor* x_grad,
SelectedRows* w_grad,
......
......@@ -20,7 +20,7 @@ KernelSignature HierarchicalSigmoidOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("hsigmoid_loss",
{"X", "Label", "W", "Bias", "PathTable", "PathCode"},
{"num_classes", "remote_prefetch", "is_sparse"},
{"num_classes", "is_sparse"},
{"Out", "PreOut", "W_Out"});
}
......@@ -36,7 +36,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"Bias",
"PreOut",
"Out@GRAD"},
{"num_classes", "remote_prefetch", "is_sparse"},
{"num_classes", "is_sparse"},
{"X@GRAD", "W@GRAD", "Bias@GRAD"});
} else if (ctx.IsSelectedRowsOutput("W@GRAD")) {
return KernelSignature("hsigmoid_loss_grad_sr",
......@@ -48,7 +48,7 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
"Bias",
"PreOut",
"Out@GRAD"},
{"num_classes", "remote_prefetch", "is_sparse"},
{"num_classes", "is_sparse"},
{"X@GRAD", "W@GRAD", "Bias@GRAD"});
} else {
return KernelSignature("unregistered", {}, {}, {});
......
......@@ -177,7 +177,6 @@ def python_api(
path_code=None,
num_classes=-1,
is_sparse=False,
remote_prefetch=False,
):
return paddle.nn.functional.hsigmoid_loss(
input,
......
......@@ -1016,7 +1016,6 @@ def hsigmoid_loss(
attrs = {
"num_classes": num_classes,
"is_sparse": is_sparse,
"remote_prefetch": is_sparse,
}
inputs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册