index.html 48.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  
  <link rel="shortcut icon" href="../../img/favicon.ico">
11
  <title>剪枝与敏感度 - PaddleSlim Docs</title>
12 13 14 15 16 17 18 19
  <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>

  <link rel="stylesheet" href="../../css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../css/theme_extra.css" type="text/css" />
  <link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css">
  
  <script>
    // Current page data
20
    var mkdocs_page_name = "\u526a\u679d\u4e0e\u654f\u611f\u5ea6";
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
    var mkdocs_page_input_path = "api/prune_api.md";
    var mkdocs_page_url = null;
  </script>
  
  <script src="../../js/jquery-2.1.1.min.js" defer></script>
  <script src="../../js/modernizr-2.8.3.min.js" defer></script>
  <script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
  <script>hljs.initHighlightingOnLoad();</script> 
  
</head>

<body class="wy-body-for-nav" role="document">

  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
      <div class="wy-side-nav-search">
        <a href="../.." class="icon icon-home"> PaddleSlim Docs</a>
        <div role="search">
  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" title="Type search term here" />
  </form>
</div>
      </div>

      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
	<ul class="current">
	  
          
            <li class="toctree-l1">
		
    <a class="" href="../..">Home</a>
	    </li>
          
            <li class="toctree-l1">
		
58 59 60 61 62
    <a class="" href="../../model_zoo/">模型库</a>
	    </li>
          
            <li class="toctree-l1">
		
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    <span class="caption-text">教程</span>
    <ul class="subnav">
                <li class="">
                    
    <a class="" href="../../tutorials/quant_post_demo/">离线量化</a>
                </li>
                <li class="">
                    
    <a class="" href="../../tutorials/quant_aware_demo/">量化训练</a>
                </li>
                <li class="">
                    
    <a class="" href="../../tutorials/quant_embedding_demo/">Embedding量化</a>
                </li>
                <li class="">
                    
    <a class="" href="../../tutorials/nas_demo/">SA搜索</a>
                </li>
                <li class="">
                    
    <a class="" href="../../tutorials/distillation_demo/">知识蒸馏</a>
                </li>
    </ul>
	    </li>
          
            <li class="toctree-l1">
		
90 91 92 93 94 95 96 97
    <span class="caption-text">API</span>
    <ul class="subnav">
                <li class="">
                    
    <a class="" href="../quantization_api/">量化</a>
                </li>
                <li class=" current">
                    
98
    <a class="current" href="./">剪枝与敏感度</a>
99 100
    <ul class="subnav">
            
101
    <li class="toctree-l3"><a href="#pruner">Pruner</a></li>
102
    
103 104 105 106 107 108 109 110 111 112 113

    <li class="toctree-l3"><a href="#sensitivity">sensitivity</a></li>
    

    <li class="toctree-l3"><a href="#merge_sensitive">merge_sensitive</a></li>
    

    <li class="toctree-l3"><a href="#load_sensitivities">load_sensitivities</a></li>
    

    <li class="toctree-l3"><a href="#get_ratios_by_loss">get_ratios_by_loss</a></li>
114 115 116 117 118 119
    

    </ul>
                </li>
                <li class="">
                    
120
    <a class="" href="../analysis_api/">模型分析</a>
121 122 123
                </li>
                <li class="">
                    
124
    <a class="" href="../single_distiller_api/">知识蒸馏</a>
125 126 127 128 129 130 131 132 133
                </li>
                <li class="">
                    
    <a class="" href="../nas_api/">SA搜索</a>
                </li>
                <li class="">
                    
    <a class="" href="../search_space/">搜索空间</a>
                </li>
134 135 136 137
                <li class="">
                    
    <a class="" href="../../table_latency/">硬件延时评估表</a>
                </li>
138 139 140
    </ul>
	    </li>
          
141 142 143 144 145
            <li class="toctree-l1">
		
    <a class="" href="../../algo/algo/">算法原理</a>
	    </li>
          
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
        </ul>
      </div>
      &nbsp;
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="../..">PaddleSlim Docs</a>
      </nav>

      
      <div class="wy-nav-content">
        <div class="rst-content">
          <div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
    <li><a href="../..">Docs</a> &raquo;</li>
    
      
        
          <li>API &raquo;</li>
        
      
    
172
    <li>剪枝与敏感度</li>
173 174
    <li class="wy-breadcrumbs-aside">
      
175
        <a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/docs/api/prune_api.md"
176 177 178 179 180 181 182 183 184
          class="icon icon-github"> Edit on GitHub</a>
      
    </li>
  </ul>
  <hr/>
</div>
          <div role="main">
            <div class="section">
              
185 186 187 188
                <h2 id="pruner">Pruner<a class="headerlink" href="#pruner" title="Permanent link">#</a></h2>
<dl>
<dt>paddleslim.prune.Pruner(criterion="l1_norm")<a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/pruner.py#L28">源代码</a></dt>
<dd>
189
<p>对卷积网络的通道进行一次剪裁。剪裁一个卷积层的通道,是指剪裁该卷积层输出的通道。卷积层的权重形状为<code>[output_channel, input_channel, kernel_size, kernel_size]</code>,通过剪裁该权重的第一纬度达到剪裁输出通道数的目的。</p>
190 191
</dd>
</dl>
192 193
<p><strong>参数:</strong></p>
<ul>
194
<li><strong>criterion</strong> - 评估一个卷积层内通道重要性所参考的指标。目前仅支持<code>l1_norm</code>。默认为<code>l1_norm</code></li>
195 196 197
</ul>
<p><strong>返回:</strong> 一个Pruner类的实例</p>
<p><strong>示例代码:</strong></p>
198 199
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="kn">from</span> <span class="nn">paddleslim.prune</span> <span class="kn">import</span> <span class="n">Pruner</span>
200 201
<span class="n">pruner</span> <span class="o">=</span> <span class="n">Pruner</span><span class="p">()</span>
</pre></div>
202
</td></tr></table>
203

204 205 206
<dl>
<dt>paddleslim.prune.Pruner.prune(program, scope, params, ratios, place=None, lazy=False, only_graph=False, param_backup=False, param_shape_backup=False)<a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/pruner.py#L36">源代码</a></dt>
<dd>
207
<p>对目标网络的一组卷积层的权重进行裁剪。</p>
208 209
</dd>
</dl>
210 211 212
<p><strong>参数:</strong></p>
<ul>
<li>
213
<p><strong>program(paddle.fluid.Program)</strong> - 要裁剪的目标网络。更多关于Program的介绍请参考:<a href="https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Program_cn.html#program">Program概念介绍</a></p>
214 215
</li>
<li>
216
<p><strong>scope(paddle.fluid.Scope)</strong> - 要裁剪的权重所在的<code>scope</code>,Paddle中用<code>scope</code>实例存放模型参数和运行时变量的值。Scope中的参数值会被<code>inplace</code>的裁剪。更多介绍请参考<a href="">Scope概念介绍</a></p>
217 218
</li>
<li>
219
<p><strong>params(list<str>)</strong> - 需要被裁剪的卷积层的参数的名称列表。可以通过以下方式查看模型中所有参数的名称:
220 221 222
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span>1
2
3</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="k">for</span> <span class="nv">block</span> <span class="nv">in</span> <span class="nv">program</span>.<span class="nv">blocks</span>:
223 224 225
    <span class="k">for</span> <span class="nv">param</span> <span class="nv">in</span> <span class="nv">block</span>.<span class="nv">all_parameters</span><span class="ss">()</span>:
        <span class="nv">print</span><span class="ss">(</span><span class="s2">&quot;</span><span class="s">param: {}; shape: {}</span><span class="s2">&quot;</span>.<span class="nv">format</span><span class="ss">(</span><span class="nv">param</span>.<span class="nv">name</span>, <span class="nv">param</span>.<span class="nv">shape</span><span class="ss">))</span>
</pre></div>
226 227
</td></tr></table></p>
</li>
228
<li>
229
<p><strong>ratios(list<float>)</strong> - 用于裁剪<code>params</code>的剪切率,类型为列表。该列表长度必须与<code>params</code>的长度一致。</p>
230 231
</li>
<li>
232
<p><strong>place(paddle.fluid.Place)</strong> - 待裁剪参数所在的设备位置,可以是<code>CUDAPlace</code><code>CPUPlace</code><a href="">Place概念介绍</a></p>
233 234
</li>
<li>
235
<p><strong>lazy(bool)</strong> - <code>lazy</code>为True时,通过将指定通道的参数置零达到裁剪的目的,参数的<code>shape保持不变</code><code>lazy</code>为False时,直接将要裁的通道的参数删除,参数的<code>shape</code>会发生变化。</p>
236 237
</li>
<li>
238
<p><strong>only_graph(bool)</strong> - 是否只裁剪网络结构。在Paddle中,Program定义了网络结构,Scope存储参数的数值。一个Scope实例可以被多个Program使用,比如定义了训练网络的Program和定义了测试网络的Program是使用同一个Scope实例的。<code>only_graph</code>为True时,只对Program中定义的卷积的通道进行剪裁;<code>only_graph</code>为false时,Scope中卷积参数的数值也会被剪裁。默认为False。</p>
239 240
</li>
<li>
241
<p><strong>param_backup(bool)</strong> - 是否返回对参数值的备份。默认为False。</p>
242 243
</li>
<li>
244
<p><strong>param_shape_backup(bool)</strong> - 是否返回对参数<code>shape</code>的备份。默认为False。</p>
245 246 247 248 249
</li>
</ul>
<p><strong>返回:</strong></p>
<ul>
<li>
250
<p><strong>pruned_program(paddle.fluid.Program)</strong> - 被裁剪后的Program。</p>
251 252
</li>
<li>
253
<p><strong>param_backup(dict)</strong> - 对参数数值的备份,用于恢复Scope中的参数数值。</p>
254 255
</li>
<li>
256
<p><strong>param_shape_backup(dict)</strong> - 对参数形状的备份。</p>
257 258 259
</li>
</ul>
<p><strong>示例:</strong></p>
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
<p>点击<a href="https://aistudio.baidu.com/aistudio/projectDetail/200786">AIStudio</a>执行以下示例代码。
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span> 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="kn">import</span> <span class="nn">paddle.fluid</span> <span class="kn">as</span> <span class="nn">fluid</span>
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
<span class="kn">from</span> <span class="nn">paddle.fluid.param_attr</span> <span class="kn">import</span> <span class="n">ParamAttr</span>
<span class="kn">from</span> <span class="nn">paddleslim.prune</span> <span class="kn">import</span> <span class="n">Pruner</span>

<span class="k">def</span> <span class="nf">conv_bn_layer</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span>
                  <span class="n">num_filters</span><span class="p">,</span>
                  <span class="n">filter_size</span><span class="p">,</span>
                  <span class="n">name</span><span class="p">,</span>
                  <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                  <span class="n">groups</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                  <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
    <span class="n">conv</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">conv2d</span><span class="p">(</span>
        <span class="nb">input</span><span class="o">=</span><span class="nb">input</span><span class="p">,</span>
        <span class="n">num_filters</span><span class="o">=</span><span class="n">num_filters</span><span class="p">,</span>
        <span class="n">filter_size</span><span class="o">=</span><span class="n">filter_size</span><span class="p">,</span>
        <span class="n">stride</span><span class="o">=</span><span class="n">stride</span><span class="p">,</span>
        <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="n">filter_size</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">//</span> <span class="mi">2</span><span class="p">,</span>
        <span class="n">groups</span><span class="o">=</span><span class="n">groups</span><span class="p">,</span>
        <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">,</span>
        <span class="n">param_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_weights&quot;</span><span class="p">),</span>
        <span class="n">bias_attr</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
        <span class="n">name</span><span class="o">=</span><span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_out&quot;</span><span class="p">)</span>
    <span class="n">bn_name</span> <span class="o">=</span> <span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_bn&quot;</span>
    <span class="k">return</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">batch_norm</span><span class="p">(</span>
        <span class="nb">input</span><span class="o">=</span><span class="n">conv</span><span class="p">,</span>
        <span class="n">act</span><span class="o">=</span><span class="n">act</span><span class="p">,</span>
        <span class="n">name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_output&#39;</span><span class="p">,</span>
        <span class="n">param_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_scale&#39;</span><span class="p">),</span>
        <span class="n">bias_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_offset&#39;</span><span class="p">),</span>
        <span class="n">moving_mean_name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_mean&#39;</span><span class="p">,</span>
        <span class="n">moving_variance_name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_variance&#39;</span><span class="p">,</span> <span class="p">)</span>

<span class="n">main_program</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Program</span><span class="p">()</span>
<span class="n">startup_program</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Program</span><span class="p">()</span>
<span class="c1">#   X       X              O       X              O</span>
<span class="c1"># conv1--&gt;conv2--&gt;sum1--&gt;conv3--&gt;conv4--&gt;sum2--&gt;conv5--&gt;conv6</span>
<span class="c1">#     |            ^ |                    ^</span>
<span class="c1">#     |____________| |____________________|</span>
<span class="c1">#</span>
<span class="c1"># X: prune output channels</span>
<span class="c1"># O: prune input channels</span>
<span class="k">with</span> <span class="n">fluid</span><span class="o">.</span><span class="n">program_guard</span><span class="p">(</span><span class="n">main_program</span><span class="p">,</span> <span class="n">startup_program</span><span class="p">):</span>
    <span class="nb">input</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s2">&quot;image&quot;</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="bp">None</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">16</span><span class="p">])</span>
    <span class="n">conv1</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv1&quot;</span><span class="p">)</span>
    <span class="n">conv2</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv1</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv2&quot;</span><span class="p">)</span>
    <span class="n">sum1</span> <span class="o">=</span> <span class="n">conv1</span> <span class="o">+</span> <span class="n">conv2</span>
    <span class="n">conv3</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">sum1</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv3&quot;</span><span class="p">)</span>
    <span class="n">conv4</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv3</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv4&quot;</span><span class="p">)</span>
    <span class="n">sum2</span> <span class="o">=</span> <span class="n">conv4</span> <span class="o">+</span> <span class="n">sum1</span>
    <span class="n">conv5</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">sum2</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv5&quot;</span><span class="p">)</span>
    <span class="n">conv6</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv5</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv6&quot;</span><span class="p">)</span>

<span class="n">place</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">CPUPlace</span><span class="p">()</span>
<span class="n">exe</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Executor</span><span class="p">(</span><span class="n">place</span><span class="p">)</span>
<span class="n">scope</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Scope</span><span class="p">()</span>
<span class="n">exe</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">startup_program</span><span class="p">,</span> <span class="n">scope</span><span class="o">=</span><span class="n">scope</span><span class="p">)</span>
<span class="n">pruner</span> <span class="o">=</span> <span class="n">Pruner</span><span class="p">()</span>
<span class="n">main_program</span><span class="p">,</span> <span class="n">_</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">pruner</span><span class="o">.</span><span class="n">prune</span><span class="p">(</span>
    <span class="n">main_program</span><span class="p">,</span>
    <span class="n">scope</span><span class="p">,</span>
    <span class="n">params</span><span class="o">=</span><span class="p">[</span><span class="s2">&quot;conv4_weights&quot;</span><span class="p">],</span>
    <span class="n">ratios</span><span class="o">=</span><span class="p">[</span><span class="mf">0.5</span><span class="p">],</span>
    <span class="n">place</span><span class="o">=</span><span class="n">place</span><span class="p">,</span>
    <span class="n">lazy</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
    <span class="n">only_graph</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
    <span class="n">param_backup</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
    <span class="n">param_shape_backup</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span>

<span class="k">for</span> <span class="n">param</span> <span class="ow">in</span> <span class="n">main_program</span><span class="o">.</span><span class="n">global_block</span><span class="p">()</span><span class="o">.</span><span class="n">all_parameters</span><span class="p">():</span>
    <span class="k">if</span> <span class="s2">&quot;weights&quot;</span> <span class="ow">in</span> <span class="n">param</span><span class="o">.</span><span class="n">name</span><span class="p">:</span>
        <span class="k">print</span><span class="p">(</span><span class="s2">&quot;param name: {}; param shape: {}&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">param</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">param</span><span class="o">.</span><span class="n">shape</span><span class="p">))</span>
</pre></div>
403
</td></tr></table></p>
404 405
<hr />
<h2 id="sensitivity">sensitivity<a class="headerlink" href="#sensitivity" title="Permanent link">#</a></h2>
406 407 408
<dl>
<dt>paddleslim.prune.sensitivity(program, place, param_names, eval_func, sensitivities_file=None, pruned_ratios=None) <a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L34">源代码</a></dt>
<dd>
409
<p>计算网络中每个卷积层的敏感度。每个卷积层的敏感度信息统计方法为:依次剪掉当前卷积层不同比例的输出通道数,在测试集上计算剪裁后的精度损失。得到敏感度信息后,可以通过观察或其它方式确定每层卷积的剪裁率。</p>
410 411
</dd>
</dl>
412 413 414
<p><strong>参数:</strong></p>
<ul>
<li>
415
<p><strong>program(paddle.fluid.Program)</strong> - 待评估的目标网络。更多关于Program的介绍请参考:<a href="https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Program_cn.html#program">Program概念介绍</a></p>
416 417
</li>
<li>
418
<p><strong>place(paddle.fluid.Place)</strong> - 待分析的参数所在的设备位置,可以是<code>CUDAPlace</code><code>CPUPlace</code><a href="">Place概念介绍</a></p>
419 420
</li>
<li>
421
<p><strong>param_names(list<str>)</strong> - 待分析的卷积层的参数的名称列表。可以通过以下方式查看模型中所有参数的名称:</p>
422 423
</li>
</ul>
424 425 426
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span>1
2
3</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="k">for</span> <span class="nv">block</span> <span class="nv">in</span> <span class="nv">program</span>.<span class="nv">blocks</span>:
427 428 429
    <span class="k">for</span> <span class="nv">param</span> <span class="nv">in</span> <span class="nv">block</span>.<span class="nv">all_parameters</span><span class="ss">()</span>:
        <span class="nv">print</span><span class="ss">(</span><span class="s2">&quot;</span><span class="s">param: {}; shape: {}</span><span class="s2">&quot;</span>.<span class="nv">format</span><span class="ss">(</span><span class="nv">param</span>.<span class="nv">name</span>, <span class="nv">param</span>.<span class="nv">shape</span><span class="ss">))</span>
</pre></div>
430
</td></tr></table>
431 432 433

<ul>
<li>
434
<p><strong>eval_func(function)</strong> - 用于评估裁剪后模型效果的回调函数。该回调函数接受被裁剪后的<code>program</code>为参数,返回一个表示当前program的精度,用以计算当前裁剪带来的精度损失。</p>
435 436
</li>
<li>
437
<p><strong>sensitivities_file(str)</strong> - 保存敏感度信息的本地文件系统的文件。在敏感度计算过程中,会持续将新计算出的敏感度信息追加到该文件中。重启任务后,文件中已有敏感度信息不会被重复计算。该文件可以用<code>pickle</code>加载。</p>
438 439
</li>
<li>
440
<p><strong>pruned_ratios(list<float>)</strong> - 计算卷积层敏感度信息时,依次剪掉的通道数比例。默认为[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]。</p>
441 442 443 444
</li>
</ul>
<p><strong>返回:</strong></p>
<ul>
445
<li><strong>sensitivities(dict)</strong> - 存放敏感度信息的dict,其格式为:</li>
446
</ul>
447 448 449 450 451 452 453 454
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span>1
2
3
4
5
6
7
8
455
9</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="err">{</span><span class="ss">&quot;weight_0&quot;</span><span class="p">:</span>
456 457 458 459 460 461 462 463 464
   <span class="err">{</span><span class="mi">0</span><span class="p">.</span><span class="mi">1</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">22</span><span class="p">,</span>
    <span class="mi">0</span><span class="p">.</span><span class="mi">2</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">33</span>
   <span class="err">}</span><span class="p">,</span>
 <span class="ss">&quot;weight_1&quot;</span><span class="p">:</span>
   <span class="err">{</span><span class="mi">0</span><span class="p">.</span><span class="mi">1</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">21</span><span class="p">,</span>
    <span class="mi">0</span><span class="p">.</span><span class="mi">2</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">4</span>
   <span class="err">}</span>
<span class="err">}</span>
</pre></div>
465
</td></tr></table>
466 467 468 469

<p>其中,<code>weight_0</code>是卷积层参数的名称,sensitivities['weight_0']的<code>value</code>为剪裁比例,<code>value</code>为精度损失的比例。</p>
<p><strong>示例:</strong></p>
<p>点击<a href="https://aistudio.baidu.com/aistudio/projectdetail/201401">AIStudio</a>运行以下示例代码。</p>
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span> 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="kn">import</span> <span class="nn">paddle</span>
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
<span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">paddle.fluid</span> <span class="kn">as</span> <span class="nn">fluid</span>
<span class="kn">from</span> <span class="nn">paddle.fluid.param_attr</span> <span class="kn">import</span> <span class="n">ParamAttr</span>
<span class="kn">from</span> <span class="nn">paddleslim.prune</span> <span class="kn">import</span> <span class="n">sensitivity</span>
<span class="kn">import</span> <span class="nn">paddle.dataset.mnist</span> <span class="kn">as</span> <span class="nn">reader</span>

<span class="k">def</span> <span class="nf">conv_bn_layer</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span>
                  <span class="n">num_filters</span><span class="p">,</span>
                  <span class="n">filter_size</span><span class="p">,</span>
                  <span class="n">name</span><span class="p">,</span>
                  <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                  <span class="n">groups</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                  <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
    <span class="n">conv</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">conv2d</span><span class="p">(</span>
        <span class="nb">input</span><span class="o">=</span><span class="nb">input</span><span class="p">,</span>
        <span class="n">num_filters</span><span class="o">=</span><span class="n">num_filters</span><span class="p">,</span>
        <span class="n">filter_size</span><span class="o">=</span><span class="n">filter_size</span><span class="p">,</span>
        <span class="n">stride</span><span class="o">=</span><span class="n">stride</span><span class="p">,</span>
        <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="n">filter_size</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">//</span> <span class="mi">2</span><span class="p">,</span>
        <span class="n">groups</span><span class="o">=</span><span class="n">groups</span><span class="p">,</span>
        <span class="n">act</span><span class="o">=</span><span class="bp">None</span><span class="p">,</span>
        <span class="n">param_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_weights&quot;</span><span class="p">),</span>
        <span class="n">bias_attr</span><span class="o">=</span><span class="bp">False</span><span class="p">,</span>
        <span class="n">name</span><span class="o">=</span><span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_out&quot;</span><span class="p">)</span>
    <span class="n">bn_name</span> <span class="o">=</span> <span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;_bn&quot;</span>
    <span class="k">return</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">batch_norm</span><span class="p">(</span>
        <span class="nb">input</span><span class="o">=</span><span class="n">conv</span><span class="p">,</span>
        <span class="n">act</span><span class="o">=</span><span class="n">act</span><span class="p">,</span>
        <span class="n">name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_output&#39;</span><span class="p">,</span>
        <span class="n">param_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_scale&#39;</span><span class="p">),</span>
        <span class="n">bias_attr</span><span class="o">=</span><span class="n">ParamAttr</span><span class="p">(</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_offset&#39;</span><span class="p">),</span>
        <span class="n">moving_mean_name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_mean&#39;</span><span class="p">,</span>
        <span class="n">moving_variance_name</span><span class="o">=</span><span class="n">bn_name</span> <span class="o">+</span> <span class="s1">&#39;_variance&#39;</span><span class="p">,</span> <span class="p">)</span>

<span class="n">main_program</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Program</span><span class="p">()</span>
<span class="n">startup_program</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Program</span><span class="p">()</span>
<span class="c1">#   X       X              O       X              O</span>
<span class="c1"># conv1--&gt;conv2--&gt;sum1--&gt;conv3--&gt;conv4--&gt;sum2--&gt;conv5--&gt;conv6</span>
<span class="c1">#     |            ^ |                    ^</span>
<span class="c1">#     |____________| |____________________|</span>
<span class="c1">#</span>
<span class="c1"># X: prune output channels</span>
<span class="c1"># O: prune input channels</span>
<span class="n">image_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span><span class="mi">28</span><span class="p">,</span><span class="mi">28</span><span class="p">]</span>
<span class="k">with</span> <span class="n">fluid</span><span class="o">.</span><span class="n">program_guard</span><span class="p">(</span><span class="n">main_program</span><span class="p">,</span> <span class="n">startup_program</span><span class="p">):</span>
    <span class="n">image</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">&#39;image&#39;</span><span class="p">,</span> <span class="kp">shape</span><span class="o">=</span><span class="p">[</span><span class="bp">None</span><span class="p">]</span><span class="o">+</span><span class="n">image_shape</span><span class="p">,</span> <span class="kp">dtype</span><span class="o">=</span><span class="s1">&#39;float32&#39;</span><span class="p">)</span>
607
    <span class="n">label</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">data</span><span class="p">(</span><span class="n">name</span><span class="o">=</span><span class="s1">&#39;label&#39;</span><span class="p">,</span> <span class="kp">shape</span><span class="o">=</span><span class="p">[</span><span class="bp">None</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="kp">dtype</span><span class="o">=</span><span class="s1">&#39;int64&#39;</span><span class="p">)</span>  
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
    <span class="n">conv1</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">image</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv1&quot;</span><span class="p">)</span>
    <span class="n">conv2</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv1</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv2&quot;</span><span class="p">)</span>
    <span class="n">sum1</span> <span class="o">=</span> <span class="n">conv1</span> <span class="o">+</span> <span class="n">conv2</span>
    <span class="n">conv3</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">sum1</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv3&quot;</span><span class="p">)</span>
    <span class="n">conv4</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv3</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv4&quot;</span><span class="p">)</span>
    <span class="n">sum2</span> <span class="o">=</span> <span class="n">conv4</span> <span class="o">+</span> <span class="n">sum1</span>
    <span class="n">conv5</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">sum2</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv5&quot;</span><span class="p">)</span>
    <span class="n">conv6</span> <span class="o">=</span> <span class="n">conv_bn_layer</span><span class="p">(</span><span class="n">conv5</span><span class="p">,</span> <span class="mi">8</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="s2">&quot;conv6&quot;</span><span class="p">)</span>
    <span class="n">out</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="n">conv6</span><span class="p">,</span> <span class="kp">size</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">act</span><span class="o">=</span><span class="s2">&quot;softmax&quot;</span><span class="p">)</span>
<span class="c1">#    cost = fluid.layers.cross_entropy(input=out, label=label)</span>
<span class="c1">#    avg_cost = fluid.layers.mean(x=cost)</span>
    <span class="n">acc_top1</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">accuracy</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">out</span><span class="p">,</span> <span class="n">label</span><span class="o">=</span><span class="n">label</span><span class="p">,</span> <span class="n">k</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
<span class="c1">#    acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)</span>


<span class="kp">place</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">CPUPlace</span><span class="p">()</span>
<span class="n">exe</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">Executor</span><span class="p">(</span><span class="kp">place</span><span class="p">)</span>
<span class="n">exe</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">startup_program</span><span class="p">)</span>

<span class="n">val_reader</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">.</span><span class="n">batch</span><span class="p">(</span><span class="n">reader</span><span class="o">.</span><span class="kp">test</span><span class="p">(),</span> <span class="n">batch_size</span><span class="o">=</span><span class="mi">128</span><span class="p">)</span>
<span class="n">val_feeder</span> <span class="o">=</span> <span class="n">feeder</span> <span class="o">=</span> <span class="n">fluid</span><span class="o">.</span><span class="n">DataFeeder</span><span class="p">(</span>
        <span class="p">[</span><span class="n">image</span><span class="p">,</span> <span class="n">label</span><span class="p">],</span> <span class="kp">place</span><span class="p">,</span> <span class="n">program</span><span class="o">=</span><span class="n">main_program</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">eval_func</span><span class="p">(</span><span class="n">program</span><span class="p">):</span>

    <span class="n">acc_top1_ns</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="k">for</span> <span class="n">data</span> <span class="ow">in</span> <span class="n">val_reader</span><span class="p">():</span>
        <span class="n">acc_top1_n</span> <span class="o">=</span> <span class="n">exe</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">program</span><span class="p">,</span>
                             <span class="n">feed</span><span class="o">=</span><span class="n">val_feeder</span><span class="o">.</span><span class="n">feed</span><span class="p">(</span><span class="n">data</span><span class="p">),</span>
                             <span class="n">fetch_list</span><span class="o">=</span><span class="p">[</span><span class="n">acc_top1</span><span class="o">.</span><span class="n">name</span><span class="p">])</span>
        <span class="n">acc_top1_ns</span><span class="o">.</span><span class="kp">append</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="kp">mean</span><span class="p">(</span><span class="n">acc_top1_n</span><span class="p">))</span>
    <span class="k">return</span> <span class="n">np</span><span class="o">.</span><span class="kp">mean</span><span class="p">(</span><span class="n">acc_top1_ns</span><span class="p">)</span>
<span class="n">param_names</span> <span class="o">=</span> <span class="p">[]</span>
<span class="k">for</span> <span class="n">param</span> <span class="ow">in</span> <span class="n">main_program</span><span class="o">.</span><span class="n">global_block</span><span class="p">()</span><span class="o">.</span><span class="n">all_parameters</span><span class="p">():</span>
    <span class="k">if</span> <span class="s2">&quot;weights&quot;</span> <span class="ow">in</span> <span class="n">param</span><span class="o">.</span><span class="n">name</span><span class="p">:</span>
        <span class="n">param_names</span><span class="o">.</span><span class="kp">append</span><span class="p">(</span><span class="n">param</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
<span class="n">sensitivities</span> <span class="o">=</span> <span class="n">sensitivity</span><span class="p">(</span><span class="n">main_program</span><span class="p">,</span>
                            <span class="kp">place</span><span class="p">,</span>
                            <span class="n">param_names</span><span class="p">,</span>
                            <span class="n">eval_func</span><span class="p">,</span>
                            <span class="n">sensitivities_file</span><span class="o">=</span><span class="s2">&quot;./sensitive.data&quot;</span><span class="p">,</span>
                            <span class="n">pruned_ratios</span><span class="o">=</span><span class="p">[</span><span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.2</span><span class="p">,</span> <span class="mf">0.3</span><span class="p">])</span>
<span class="k">print</span><span class="p">(</span><span class="n">sensitivities</span><span class="p">)</span>
</pre></div>
652
</td></tr></table>
653 654

<h2 id="merge_sensitive">merge_sensitive<a class="headerlink" href="#merge_sensitive" title="Permanent link">#</a></h2>
655 656 657
<dl>
<dt>paddleslim.prune.merge_sensitive(sensitivities)<a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L161">源代码</a></dt>
<dd>
658
<p>合并多个敏感度信息。</p>
659 660
</dd>
</dl>
661 662
<p>参数:</p>
<ul>
663
<li><strong>sensitivities(list<dict> | list<str>)</strong> - 待合并的敏感度信息,可以是字典的列表,或者是存放敏感度信息的文件的路径列表。</li>
664 665 666
</ul>
<p>返回:</p>
<ul>
667
<li><strong>sensitivities(dict)</strong> - 合并后的敏感度信息。其格式为:</li>
668
</ul>
669 670 671 672 673 674 675 676
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span>1
2
3
4
5
6
7
8
677
9</pre></div></td><td class="code"><div class="codehilite"><pre><span></span><span class="err">{</span><span class="ss">&quot;weight_0&quot;</span><span class="p">:</span>
678 679 680 681 682 683 684 685 686
   <span class="err">{</span><span class="mi">0</span><span class="p">.</span><span class="mi">1</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">22</span><span class="p">,</span>
    <span class="mi">0</span><span class="p">.</span><span class="mi">2</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">33</span>
   <span class="err">}</span><span class="p">,</span>
 <span class="ss">&quot;weight_1&quot;</span><span class="p">:</span>
   <span class="err">{</span><span class="mi">0</span><span class="p">.</span><span class="mi">1</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">21</span><span class="p">,</span>
    <span class="mi">0</span><span class="p">.</span><span class="mi">2</span><span class="p">:</span> <span class="mi">0</span><span class="p">.</span><span class="mi">4</span>
   <span class="err">}</span>
<span class="err">}</span>
</pre></div>
687
</td></tr></table>
688 689 690 691

<p>其中,<code>weight_0</code>是卷积层参数的名称,sensitivities['weight_0']的<code>value</code>为剪裁比例,<code>value</code>为精度损失的比例。</p>
<p>示例:</p>
<h2 id="load_sensitivities">load_sensitivities<a class="headerlink" href="#load_sensitivities" title="Permanent link">#</a></h2>
692 693 694
<dl>
<dt>paddleslim.prune.load_sensitivities(sensitivities_file)<a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L184">源代码</a></dt>
<dd>
695
<p>从文件中加载敏感度信息。</p>
696 697
</dd>
</dl>
698 699
<p>参数:</p>
<ul>
700
<li><strong>sensitivities_file(str)</strong> - 存放敏感度信息的本地文件.</li>
701 702 703
</ul>
<p>返回:</p>
<ul>
704
<li><strong>sensitivities(dict)</strong> - 敏感度信息。</li>
705 706
</ul>
<p>示例:</p>
707 708 709 710
<h2 id="get_ratios_by_loss">get_ratios_by_loss<a class="headerlink" href="#get_ratios_by_loss" title="Permanent link">#</a></h2>
<dl>
<dt>paddleslim.prune.get_ratios_by_loss(sensitivities, loss)<a href="https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L206">源代码</a></dt>
<dd>
711
<p>根据敏感度和精度损失阈值计算出一组剪切率。对于参数<code>w</code>, 其剪裁率为使精度损失低于<code>loss</code>的最大剪裁率。</p>
712 713
</dd>
</dl>
714 715 716
<p>参数:</p>
<ul>
<li>
717
<p><strong>sensitivities(dict)</strong> - 敏感度信息。</p>
718 719
</li>
<li>
720
<p><strong>loss</strong> - 精度损失阈值。</p>
721 722 723 724
</li>
</ul>
<p>返回:</p>
<ul>
725
<li><strong>ratios(dict)</strong> - 一组剪切率。<code>key</code>是待剪裁参数的名称。<code>value</code>是对应参数的剪裁率。</li>
726 727 728 729 730 731 732 733
</ul>
              
            </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
734
        <a href="../analysis_api/" class="btn btn-neutral float-right" title="模型分析">Next <span class="icon icon-circle-arrow-right"></span></a>
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
      
      
        <a href="../quantization_api/" class="btn btn-neutral" title="量化"><span class="icon icon-circle-arrow-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <!-- Copyright etc -->
    
  </div>

  Built with <a href="http://www.mkdocs.org">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
      
        </div>
      </div>

    </section>

  </div>

  <div class="rst-versions" role="note" style="cursor: pointer">
    <span class="rst-current-version" data-toggle="rst-current-version">
      
          <a href="https://github.com/PaddlePaddle/PaddleSlim/" class="fa fa-github" style="float: left; color: #fcfcfc"> GitHub</a>
      
      
        <span><a href="../quantization_api/" style="color: #fcfcfc;">&laquo; Previous</a></span>
      
      
        <span style="margin-left: 15px"><a href="../analysis_api/" style="color: #fcfcfc">Next &raquo;</a></span>
      
    </span>
</div>
    <script>var base_url = '../..';</script>
    <script src="../../js/theme.js" defer></script>
      <script src="../../mathjax-config.js" defer></script>
775
      <script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML" defer></script>
776 777 778 779
      <script src="../../search/main.js" defer></script>

</body>
</html>