提交 97c432ee 编写于 作者: T Travis CI

Deploy to GitHub Pages: b64aac54

上级 f3278e8d
......@@ -286,28 +286,50 @@ class TestMulOp(unittest.TestCase):
反向Op单测继承自`GradientChecker`,而`GradientChecker`集成自`unittest.TestCase`,所以反向单测函数需要`test_`开头。
```
class MulGradOpTest(GradientChecker):
def test_mul(self):
op = create_op("mul")
inputs = {
```
class TestMulGradOp(GradientChecker):
def setUp(self):
self.op = create_op("mul")
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
self.compare_grad(op, inputs)
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
# mul op will enlarge the relative error
self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5)
```
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
```
下面解释一些关键的地方:
- 调用`create_op("mul")`创建反向Op对应的前向Op。
- 定义输入`inputs`。
- 调用`compare_grad`函数对比CPU、GPU计算结果。
- 调用`check_grad`检查梯度稳定性,这里采用数值法检测梯度正确性。
- 第一个参数`op` : 前向op。
- 第二个参数`inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。
- 第三个参数`set(["X", "Y"])` : 指定对输入变量`X`、`Y`做梯度检测。
- `test_normal`中调用`check_grad`检查梯度稳定性,这里采用数值法检测梯度正确性。
- 第一个参数`self.op` : 前向Op。
- 第二个参数`self.inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。
- 第三个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。
- 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out`
- `test_ignore_x`和`test_ignore_y`分支测试只需要计算一个输入梯度的情况。
### 编译和执行
......
......@@ -439,30 +439,51 @@ Kernel实现 | CPU、GPU共享Kernel在<code class="docutils literal"><spa
<div class="section" id="operator">
<span id="id7"></span><h3>反向Operator单测<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
<p>反向Op单测继承自<code class="docutils literal"><span class="pre">GradientChecker</span></code>,而<code class="docutils literal"><span class="pre">GradientChecker</span></code>集成自<code class="docutils literal"><span class="pre">unittest.TestCase</span></code>,所以反向单测函数需要<code class="docutils literal"><span class="pre">test_</span></code>开头。</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulGradOpTest</span><span class="p">(</span><span class="n">GradientChecker</span><span class="p">):</span>
<span class="k">def</span> <span class="nf">test_mul</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">op</span> <span class="o">=</span> <span class="n">create_op</span><span class="p">(</span><span class="s2">&quot;mul&quot;</span><span class="p">)</span>
<span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
<span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
<span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
<span class="p">}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">compare_grad</span><span class="p">(</span><span class="n">op</span><span class="p">,</span> <span class="n">inputs</span><span class="p">)</span>
<span class="c1"># mul op will enlarge the relative error</span>
<span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
<span class="n">op</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="nb">set</span><span class="p">([</span><span class="s2">&quot;X&quot;</span><span class="p">,</span> <span class="s2">&quot;Y&quot;</span><span class="p">]),</span> <span class="s2">&quot;Out&quot;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">TestMulGradOp</span><span class="p">(</span><span class="n">GradientChecker</span><span class="p">):</span>
<span class="k">def</span> <span class="nf">setUp</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">op</span> <span class="o">=</span> <span class="n">create_op</span><span class="p">(</span><span class="s2">&quot;mul&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
<span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
<span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
<span class="p">}</span>
<span class="k">def</span> <span class="nf">test_cpu_gpu_compare</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">compare_grad</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">test_normal</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="c1"># mul op will enlarge the relative error</span>
<span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;X&quot;</span><span class="p">,</span> <span class="s2">&quot;Y&quot;</span><span class="p">],</span> <span class="s2">&quot;Out&quot;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">test_ignore_x</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;Y&quot;</span><span class="p">],</span>
<span class="s2">&quot;Out&quot;</span><span class="p">,</span>
<span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span>
<span class="n">no_grad_set</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;X&quot;</span><span class="p">})</span>
<span class="k">def</span> <span class="nf">test_ignore_y</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;X&quot;</span><span class="p">],</span>
<span class="s2">&quot;Out&quot;</span><span class="p">,</span>
<span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span>
<span class="n">no_grad_set</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;Y&quot;</span><span class="p">})</span>
</pre></div>
</div>
<p>下面解释一些关键的地方:</p>
<ul class="simple">
<li>调用<code class="docutils literal"><span class="pre">create_op(&quot;mul&quot;)</span></code>创建反向Op对应的前向Op。</li>
<li>定义输入<code class="docutils literal"><span class="pre">inputs</span></code></li>
<li>调用<code class="docutils literal"><span class="pre">compare_grad</span></code>函数对比CPU、GPU计算结果。</li>
<li>调用<code class="docutils literal"><span class="pre">check_grad</span></code>检查梯度稳定性,这里采用数值法检测梯度正确性。<ul>
<li>第一个参数<code class="docutils literal"><span class="pre">op</span></code> : 前向op。</li>
<li>第二个参数<code class="docutils literal"><span class="pre">inputs</span></code> : 输入词典,词典的Key和<code class="docutils literal"><span class="pre">ProtoMaker</span></code>定义保持一致。</li>
<li>第三个参数<code class="docutils literal"><span class="pre">set([&quot;X&quot;,</span> <span class="pre">&quot;Y&quot;])</span></code> : 指定对输入变量<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>做梯度检测。</li>
<li><code class="docutils literal"><span class="pre">test_normal</span></code>调用<code class="docutils literal"><span class="pre">check_grad</span></code>检查梯度稳定性,这里采用数值法检测梯度正确性。<ul>
<li>第一个参数<code class="docutils literal"><span class="pre">self.op</span></code> : 前向Op。</li>
<li>第二个参数<code class="docutils literal"><span class="pre">self.inputs</span></code> : 输入词典,词典的Key和<code class="docutils literal"><span class="pre">ProtoMaker</span></code>定义保持一致。</li>
<li>第三个参数<code class="docutils literal"><span class="pre">[&quot;X&quot;,</span> <span class="pre">&quot;Y&quot;]</span></code> : 指定对输入变量<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>做梯度检测。</li>
<li>第四个参数<code class="docutils literal"><span class="pre">&quot;Out&quot;</span></code> : 指定前向网络最终的输出目标变量<code class="docutils literal"><span class="pre">Out</span></code></li>
</ul>
</li>
<li><code class="docutils literal"><span class="pre">test_ignore_x</span></code><code class="docutils literal"><span class="pre">test_ignore_y</span></code>分支测试只需要计算一个输入梯度的情况。</li>
</ul>
</div>
<div class="section" id="">
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册