提交 af66fcb2 编写于 作者: T Travis CI

Deploy to GitHub Pages: dd0008d5

上级 09e2dd36
......@@ -65,20 +65,6 @@ class Optimizer(object):
def __init__(self):
pass
def create_backward_pass(self, loss, parameter_list=None):
"""
create and add gradient Operators in BlockDesc to Compute gradients of `loss`
for parameters in parameter_list
Args:
loss: an variable generated by cost function.
parameter_list: parameters that need to compute gradient and update to optimize the lost.
Returns:
list of (parameters, gradients) pair.
"""
return None
def create_optimization_pass(self, parameters_and_grads):
"""Add optimization operators to update gradients to variables.
......@@ -93,7 +79,7 @@ class Optimizer(object):
def minimize(self, loss, parameter_list):
"""Add operations to minimize `loss` by updating `parameter_list`.
This method combines interface `create_backward_pass()` and
This method combines interface `append_backward_ops()` and
`create_optimization_pass()` into one.
"""
params_grads = self.create_backward_pass(loss, parameter_list)
......
......@@ -243,20 +243,6 @@
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">pass</span>
<span class="k">def</span> <span class="nf">create_backward_pass</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> create and add gradient Operators in BlockDesc to Compute gradients of `loss`</span>
<span class="sd"> for parameters in parameter_list</span>
<span class="sd"> Args:</span>
<span class="sd"> loss: an variable generated by cost function.</span>
<span class="sd"> parameter_list: parameters that need to compute gradient and update to optimize the lost.</span>
<span class="sd"> Returns:</span>
<span class="sd"> list of (parameters, gradients) pair.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">None</span>
<span class="k">def</span> <span class="nf">create_optimization_pass</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">parameters_and_grads</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Add optimization operators to update gradients to variables.</span>
......@@ -271,7 +257,7 @@
<span class="k">def</span> <span class="nf">minimize</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Add operations to minimize `loss` by updating `parameter_list`.</span>
<span class="sd"> This method combines interface `create_backward_pass()` and</span>
<span class="sd"> This method combines interface `append_backward_ops()` and</span>
<span class="sd"> `create_optimization_pass()` into one.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">params_grads</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_backward_pass</span><span class="p">(</span><span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="p">)</span>
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -65,20 +65,6 @@ class Optimizer(object):
def __init__(self):
pass
def create_backward_pass(self, loss, parameter_list=None):
"""
create and add gradient Operators in BlockDesc to Compute gradients of `loss`
for parameters in parameter_list
Args:
loss: an variable generated by cost function.
parameter_list: parameters that need to compute gradient and update to optimize the lost.
Returns:
list of (parameters, gradients) pair.
"""
return None
def create_optimization_pass(self, parameters_and_grads):
"""Add optimization operators to update gradients to variables.
......@@ -93,7 +79,7 @@ class Optimizer(object):
def minimize(self, loss, parameter_list):
"""Add operations to minimize `loss` by updating `parameter_list`.
This method combines interface `create_backward_pass()` and
This method combines interface `append_backward_ops()` and
`create_optimization_pass()` into one.
"""
params_grads = self.create_backward_pass(loss, parameter_list)
......
......@@ -257,20 +257,6 @@
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">pass</span>
<span class="k">def</span> <span class="nf">create_backward_pass</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> create and add gradient Operators in BlockDesc to Compute gradients of `loss`</span>
<span class="sd"> for parameters in parameter_list</span>
<span class="sd"> Args:</span>
<span class="sd"> loss: an variable generated by cost function.</span>
<span class="sd"> parameter_list: parameters that need to compute gradient and update to optimize the lost.</span>
<span class="sd"> Returns:</span>
<span class="sd"> list of (parameters, gradients) pair.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">None</span>
<span class="k">def</span> <span class="nf">create_optimization_pass</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">parameters_and_grads</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Add optimization operators to update gradients to variables.</span>
......@@ -285,7 +271,7 @@
<span class="k">def</span> <span class="nf">minimize</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Add operations to minimize `loss` by updating `parameter_list`.</span>
<span class="sd"> This method combines interface `create_backward_pass()` and</span>
<span class="sd"> This method combines interface `append_backward_ops()` and</span>
<span class="sd"> `create_optimization_pass()` into one.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">params_grads</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_backward_pass</span><span class="p">(</span><span class="n">loss</span><span class="p">,</span> <span class="n">parameter_list</span><span class="p">)</span>
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册