提交 bebe9e0e 编写于 作者: W wenkai

update histogram summary doc, add tips about histogram summary performance.

上级 29e26f19
...@@ -80,7 +80,6 @@ class MyOptimizer(Optimizer): ...@@ -80,7 +80,6 @@ class MyOptimizer(Optimizer):
# Initialize ScalarSummary # Initialize ScalarSummary
self.sm_scalar = P.ScalarSummary() self.sm_scalar = P.ScalarSummary()
self.histogram_summary = P.HistogramSummary() self.histogram_summary = P.HistogramSummary()
self.param_count = len(self.parameters)
self.weight_names = [param.name for param in self.parameters] self.weight_names = [param.name for param in self.parameters]
def construct(self, grads): def construct(self, grads):
...@@ -89,8 +88,9 @@ class MyOptimizer(Optimizer): ...@@ -89,8 +88,9 @@ class MyOptimizer(Optimizer):
self.sm_scalar("learning_rate", learning_rate) self.sm_scalar("learning_rate", learning_rate)
# Record weight # Record weight
for i in range(self.param_count): self.histogram_summary(self.weight_names[0], self.paramters[0])
self.histogram_summary(self.weight_names[i], self.paramters[i]) # Record gradient
self.histogram_summary(self.weight_names[0] + ".gradient", grads[0])
...... ......
...@@ -169,6 +169,8 @@ Use the `save_graphs` option of `context` to record the computational graph afte ...@@ -169,6 +169,8 @@ Use the `save_graphs` option of `context` to record the computational graph afte
> Currently MindSpore supports recording computational graph after operator fusion for Ascend 910 AI processor only. > Currently MindSpore supports recording computational graph after operator fusion for Ascend 910 AI processor only.
> It's recommended that you reduce calls to `HistogramSummary` under 10 times per batch. The more you call `HistogramSummary`, the more performance overhead.
## MindInsight Commands ## MindInsight Commands
### View the command help information. ### View the command help information.
......
...@@ -85,7 +85,6 @@ class MyOptimizer(Optimizer): ...@@ -85,7 +85,6 @@ class MyOptimizer(Optimizer):
# Initialize ScalarSummary # Initialize ScalarSummary
self.sm_scalar = P.ScalarSummary() self.sm_scalar = P.ScalarSummary()
self.histogram_summary = P.HistogramSummary() self.histogram_summary = P.HistogramSummary()
self.param_count = len(self.parameters)
self.weight_names = [param.name for param in self.parameters] self.weight_names = [param.name for param in self.parameters]
def construct(self, grads): def construct(self, grads):
...@@ -94,8 +93,9 @@ class MyOptimizer(Optimizer): ...@@ -94,8 +93,9 @@ class MyOptimizer(Optimizer):
self.sm_scalar("learning_rate", learning_rate) self.sm_scalar("learning_rate", learning_rate)
# Record weight # Record weight
for i in range(self.param_count): self.histogram_summary(self.weight_names[0], self.paramters[0])
self.histogram_summary(self.weight_names[i], self.paramters[i]) # Record gradient
self.histogram_summary(self.weight_names[0] + ".gradient", grads[0])
...... ......
...@@ -175,6 +175,8 @@ def test_summary(): ...@@ -175,6 +175,8 @@ def test_summary():
> 目前MindSpore仅支持在Ascend 910 AI处理器上导出算子融合后的计算图。 > 目前MindSpore仅支持在Ascend 910 AI处理器上导出算子融合后的计算图。
> 一个batch中,`HistogramSummary`算子的调用次数请尽量控制在10次以下,调用次数越多,性能开销越大。
## MindInsight相关命令 ## MindInsight相关命令
### 查看命令帮助信息 ### 查看命令帮助信息
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册