提交 2f6b529a 编写于 作者: J JiabinYang

refine code and comments, test=develop

上级 02d68051
...@@ -193,7 +193,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference ...@@ -193,7 +193,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference
block->Var(out_W_var_name) block->Var(out_W_var_name)
->SetType(framework::proto::VarType::LOD_TENSOR); ->SetType(framework::proto::VarType::LOD_TENSOR);
VLOG(3) << "hierarchical_sigmoid_grad op " VLOG(3) << "hierarchical_sigmoid_grad op "
<< framework::GradVarName("Bias") << " is set to SelectedRows"; << framework::GradVarName("Bias") << " is set to LoDTensor";
block->Var(out_Bias_var_name) block->Var(out_Bias_var_name)
->SetType(framework::proto::VarType::LOD_TENSOR); ->SetType(framework::proto::VarType::LOD_TENSOR);
} }
......
...@@ -120,8 +120,6 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat, ...@@ -120,8 +120,6 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
size_t input_width = input.dims()[1]; size_t input_width = input.dims()[1];
size_t tmat_width = tmat.dims()[1]; size_t tmat_width = tmat.dims()[1];
size_t weight_width = weight->dims()[1]; size_t weight_width = weight->dims()[1];
VLOG(30) << "sparse w_grad dims is [" << weight->dims()[0] << " ,"
<< weight->dims()[1] << " ]";
auto tmat_value = tmat.data<T>(); auto tmat_value = tmat.data<T>();
auto weight_value = weight->data<T>(); auto weight_value = weight->data<T>();
auto input_value = input.data<T>(); auto input_value = input.data<T>();
...@@ -147,8 +145,6 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat, ...@@ -147,8 +145,6 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
size_t input_width = input.dims()[1]; size_t input_width = input.dims()[1];
size_t tmat_width = tmat.dims()[1]; size_t tmat_width = tmat.dims()[1];
size_t weight_width = weight->value().dims()[1]; size_t weight_width = weight->value().dims()[1];
VLOG(30) << "sparse w_grad dims is: [" << weight->value().dims()[0] << " ,"
<< weight->value().dims()[1] << " ]";
auto tmat_value = tmat.data<T>(); auto tmat_value = tmat.data<T>();
auto weight_value = weight->mutable_value()->data<T>(); auto weight_value = weight->mutable_value()->data<T>();
auto input_value = input.data<T>(); auto input_value = input.data<T>();
...@@ -157,11 +153,9 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat, ...@@ -157,11 +153,9 @@ void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
int code_length = code->get_length(); int code_length = code->get_length();
for (int j = 0; j < code_length; ++j) { for (int j = 0; j < code_length; ++j) {
size_t index = code->calc_index(j); size_t index = code->calc_index(j);
for (size_t k = 0; k < input_width; ++k) { for (size_t k = 0; k < input_width; ++k) {
int64_t row_index = int64_t row_index =
weight->AutoGrownIndex(static_cast<int64_t>(index), false, true); weight->AutoGrownIndex(static_cast<int64_t>(index), false, true);
weight_value[row_index * weight_width + k] += weight_value[row_index * weight_width + k] +=
tmat_value[i * tmat_width + j] * input_value[input_width * i + k]; tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
} }
......
...@@ -4581,7 +4581,8 @@ def hsigmoid(input, ...@@ -4581,7 +4581,8 @@ def hsigmoid(input,
is not set, the bias is initialized zero. Default: None. is not set, the bias is initialized zero. Default: None.
name (str|None): A name for this layer(optional). If set None, the layer name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically. Default: None. will be named automatically. Default: None.
is_costum: (bool|False)using user defined binary tree instead of default complete binary tree is_costum: (bool|False)using user defined binary tree instead of default complete binary tree, if costum is
set you need to set ptable/pcode/non_leaf_num, otherwise num_classes should be set
is_sparse: (bool|False)using sparse update instead of dense update is_sparse: (bool|False)using sparse update instead of dense update
Returns: Returns:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册