new_op_cn.html 56.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>如何写新的Operator &mdash; PaddlePaddle  文档</title>
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  

  
  
        <link rel="index" title="索引"
              href="../../genindex.html"/>
        <link rel="search" title="搜索" href="../../search.html"/>
    <link rel="top" title="PaddlePaddle  文档" href="../../index.html"/> 

  <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/override.css" type="text/css" />
  <script>
  var _hmt = _hmt || [];
  (function() {
    var hm = document.createElement("script");
    hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
    var s = document.getElementsByTagName("script")[0]; 
    s.parentNode.insertBefore(hm, s);
  })();
  </script>

  

  
  <script src="../../_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

  
  <header class="site-header">
    <div class="site-logo">
      <a href="/"><img src="../../_static/images/PP_w.png"></a>
    </div>
    <div class="site-nav-links">
      <div class="site-menu">
        <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a>
        <div class="language-switcher dropdown">
          <a type="button" data-toggle="dropdown">
            <span>English</span>
            <i class="fa fa-angle-up"></i>
            <i class="fa fa-angle-down"></i>
          </a>
          <ul class="dropdown-menu">
            <li><a href="/doc_cn">中文</a></li>
            <li><a href="/doc">English</a></li>
          </ul>
        </div>
        <ul class="site-page-links">
          <li><a href="/">Home</a></li>
        </ul>
      </div>
      <div class="doc-module">
        
        <ul>
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a></li>
<li class="toctree-l1"><a class="reference internal" href="../index_cn.html">进阶指南</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../api/index_cn.html">API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a></li>
</ul>

        
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>        
      </div>
    </div>
  </header>
  
  <div class="main-content-wrap">

    
    <nav class="doc-menu-vertical" role="navigation">
        
          
          <ul>
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/build_and_install/index_cn.html">安装与编译</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../getstarted/build_and_install/docker_install_cn.html">PaddlePaddle的Docker容器使用方式</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../getstarted/build_and_install/cmake/build_from_source_cn.html">PaddlePaddle的编译选项</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../index_cn.html">进阶指南</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../usage/cmd_parameter/index_cn.html">设置命令行参数</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/use_case_cn.html">使用案例</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/arguments_cn.html">参数概述</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/detail_introduction_cn.html">细节描述</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../usage/cluster/cluster_train_cn.html">运行分布式训练</a></li>
<li class="toctree-l2"><a class="reference internal" href="../usage/k8s/k8s_basis_cn.html">Kubernetes 简介</a></li>
<li class="toctree-l2"><a class="reference internal" href="../usage/k8s/k8s_cn.html">Kubernetes单机训练</a></li>
<li class="toctree-l2"><a class="reference internal" href="../usage/k8s/k8s_distributed_cn.html">Kubernetes分布式训练</a></li>
<li class="toctree-l2"><a class="reference internal" href="build_cn.html">编译PaddlePaddle和运行单元测试</a></li>
<li class="toctree-l2"><a class="reference internal" href="write_docs_cn.html">如何贡献/修改文档</a></li>
<li class="toctree-l2"><a class="reference internal" href="contribute_to_paddle_cn.html">如何贡献代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="../deep_model/rnn/index_cn.html">RNN相关模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/rnn_config_cn.html">RNN配置</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/recurrent_group_cn.html">Recurrent Group教程</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../optimization/gpu_profiling_cn.html">GPU性能分析与调优</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../api/index_cn.html">API</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/model_configs.html">模型配置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/activation.html">Activation</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/layer.html">Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/evaluators.html">Evaluators</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/optimizer.html">Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/pooling.html">Pooling</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/networks.html">Networks</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/attr.html">Parameter Attribute</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/data.html">数据访问</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/run_logic.html">训练与应用</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a></li>
</ul>

        
    </nav>
    
    <section class="doc-content-wrap">

      

 







<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
      
    <li>如何写新的Operator</li>
  </ul>
</div>
      
      <div class="wy-nav-content" id="doc-content">
        <div class="rst-content">
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="operator">
<span id="operator"></span><h1>如何写新的Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h1>
<ul class="simple">
<li><a class="reference external" href="#概念简介">概念简介</a></li>
<li><a class="reference external" href="#实现C++类">实现C++类</a><ul>
<li><a class="reference external" href="#定义ProtoMaker类">定义ProtoMaker类</a></li>
<li><a class="reference external" href="#定义Operator类">定义Operator类</a></li>
<li><a class="reference external" href="#定义OpKernel类">定义OpKernel类</a></li>
195
<li><a class="reference external" href="#注册Operator">注册Operator</a></li>
196 197 198 199 200 201 202
<li><a class="reference external" href="#编译">编译</a></li>
</ul>
</li>
<li><a class="reference external" href="#绑定Python">绑定Python</a></li>
<li><a class="reference external" href="#实现单元测试">实现单元测试</a><ul>
<li><a class="reference external" href="#前向Operator单测">前向Operator单测</a></li>
<li><a class="reference external" href="#反向Operator单测">反向Operator单测</a></li>
203
<li><a class="reference external" href="#编译和执行">编译和执行</a></li>
204 205 206 207 208 209 210 211 212 213 214 215
</ul>
</li>
</ul>
<div class="section" id="">
<span id="id1"></span><h2>概念简介<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
<p>简单介绍需要用到基类,详细介绍请参考设计文档。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OperatorBase</span></code>: Operator(简写,Op)基类。</li>
<li><code class="docutils literal"><span class="pre">framework::OpKernel</span></code>: Op计算函数的基类,称作Kernel。</li>
<li><code class="docutils literal"><span class="pre">framework::OperatorWithKernel</span></code>:继承自OperatorBase,Op有计算函数,称作有Kernel。</li>
<li><code class="docutils literal"><span class="pre">class</span> <span class="pre">OpProtoAndCheckerMaker</span></code>:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成</li>
</ul>
216 217 218 219
<p>依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自<code class="docutils literal"><span class="pre">OperatorBase</span></code>,后者继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下:</p>
<p>内容            | 定义位置
&#8212;&#8212;&#8212;&#8212;&#8211;  | :&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;-
OpProtoMake定义  | <code class="docutils literal"><span class="pre">.cc</span></code>文件,Backward Op不需要定义OpProtoMake
220
Op定义           | <code class="docutils literal"><span class="pre">.cc</span></code>文件
221 222
Kernel实现       | CPU、GPU共享Kernel实现在<code class="docutils literal"><span class="pre">.h</span></code>文件中,否则,CPU 实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,GPU 实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中。
注册Op           | Op注册实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件;Kernel注册CPU实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,GPU实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中</p>
223
<p>实现新的op都添加至目录<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators">paddle/operators</a>下,文件命名以<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有) 、 <code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)结尾。<strong>系统会根据文件名自动构建op和其对应的Python扩展。</strong></p>
224 225 226 227 228 229
<p>下面以矩阵乘操作,即<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc">MulOp</a>为例来介绍如何写带Kernel的Operator。</p>
</div>
<div class="section" id="c">
<span id="c"></span><h2>实现C++类<a class="headerlink" href="#c" title="永久链接至标题"></a></h2>
<div class="section" id="protomaker">
<span id="protomaker"></span><h3>1. 定义ProtoMaker类<a class="headerlink" href="#protomaker" title="永久链接至标题"></a></h3>
230 231
<p>矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。</p>
<p>首先定义<code class="docutils literal"><span class="pre">ProtoMaker</span></code>来描述该Op的输入、输出,并添加注释:</p>
232 233 234 235 236 237 238 239 240 241 242
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
  <span class="n">MulOpMaker</span><span class="p">(</span><span class="n">framework</span><span class="o">::</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;The first input of mul op&quot;</span><span class="p">);</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">,</span> <span class="s">&quot;The second input of mul op&quot;</span><span class="p">);</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;The output of mul op&quot;</span><span class="p">);</span>
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s"></span>
<span class="s">Two Element Mul Operator.</span>
<span class="s">The equation is: Out = X * Y</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
243 244 245 246
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
247
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43"><code class="docutils literal"><span class="pre">MulOpMaker</span></code></a>继承自<code class="docutils literal"><span class="pre">framework::OpProtoAndCheckerMaker</span></code>,构造函数含有2个参数:</p>
248 249 250 251
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OpProto</span></code> : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。</li>
<li><code class="docutils literal"><span class="pre">framework::OpAttrChecker</span></code> :后者用于检查参数属性的合法性。</li>
</ul>
252 253 254
<p>构造函数里通过<code class="docutils literal"><span class="pre">AddInput</span></code>添加输入参数,通过<code class="docutils literal"><span class="pre">AddOutput</span></code>添加输出参数,通过<code class="docutils literal"><span class="pre">AddComment</span></code>添加Op的注释。这些函数会将对应内容添加到<code class="docutils literal"><span class="pre">OpProto</span></code>中。</p>
<p>上面的代码在<code class="docutils literal"><span class="pre">MulOp</span></code>中添加两个输入<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>,添加了一个输出<code class="docutils literal"><span class="pre">Out</span></code>,并解释了各自含义,命名请遵守命名规范。</p>
<p>再以<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37"><code class="docutils literal"><span class="pre">ScaleOp</span></code></a>为例:</p>
255 256 257 258 259 260 261 262 263 264 265
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">AttrType</span><span class="o">&gt;</span>
<span class="k">class</span> <span class="nc">ScaleOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
  <span class="n">ScaleOpMaker</span><span class="p">(</span><span class="n">framework</span><span class="o">::</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;The input tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;The output tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s">Scale operator</span>
<span class="s">The equation is: Out = scale*X</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
    <span class="n">AddAttr</span><span class="o">&lt;</span><span class="n">AttrType</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;scale&quot;</span><span class="p">,</span> <span class="s">&quot;scale of scale operator.&quot;</span><span class="p">).</span><span class="n">SetDefault</span><span class="p">(</span><span class="mf">1.0</span><span class="p">);</span>
266 267 268 269
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
270
<p>这个例子有两处不同:</p>
271
<ul class="simple">
272
<li><code class="docutils literal"><span class="pre">AddInput(&quot;X&quot;,&quot;...&quot;).NotInGradient()</span></code> : 表示<code class="docutils literal"><span class="pre">X</span></code>这个输入不参与<code class="docutils literal"><span class="pre">ScaleOp</span></code>对应的梯度Op计算之中,如果Op的某个输入不参与反向梯度的计算,请显示地调用<code class="docutils literal"><span class="pre">.NotInGradient()</span></code>进行设置。</li>
273 274 275 276 277
<li><code class="docutils literal"><span class="pre">AddAttr&lt;AttrType&gt;(&quot;scale&quot;,</span> <span class="pre">&quot;...&quot;).SetDefault(1.0);</span></code> : 增加<code class="docutils literal"><span class="pre">scale</span></code>系数,作为参数属性,并且设置默认值为1.0。</li>
</ul>
</div>
<div class="section" id="operator">
<span id="id2"></span><h3>2. 定义Operator类<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
278
<p>下面的点实现了MulOp的定义:</p>
279
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOp</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span> <span class="p">{</span>
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
 <span class="k">public</span><span class="o">:</span>
  <span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>

 <span class="k">protected</span><span class="o">:</span>
  <span class="kt">void</span> <span class="n">InferShape</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">InferShapeContext</span> <span class="o">&amp;</span><span class="n">ctx</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
    <span class="k">auto</span> <span class="n">dim0</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="k">auto</span> <span class="n">dim1</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim0</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input X(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim1</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input Y(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span>
        <span class="n">dim0</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
        <span class="s">&quot;First matrix&#39;s width must be equal with second matrix&#39;s height.&quot;</span><span class="p">);</span>
    <span class="n">ctx</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">Resize</span><span class="p">({</span><span class="n">dim0</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">1</span><span class="p">]});</span>
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22"><code class="docutils literal"><span class="pre">MulOp</span></code></a>继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code><code class="docutils literal"><span class="pre">public</span></code>成员:</p>
302
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>
303 304 305
</pre></div>
</div>
<p>这句表示使用基类<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>的构造函数,也可写成:</p>
306
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="n">MulOp</span><span class="p">(</span><span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span> <span class="o">&amp;</span><span class="n">type</span><span class="p">,</span> <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">inputs</span><span class="p">,</span>
307 308 309 310 311
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">outputs</span><span class="p">,</span>
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">AttributeMap</span> <span class="o">&amp;</span><span class="n">attrs</span><span class="p">)</span>
  <span class="o">:</span> <span class="n">OperatorWithKernel</span><span class="p">(</span><span class="n">type</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">outputs</span><span class="p">,</span> <span class="n">attrs</span><span class="p">)</span> <span class="p">{}</span>
</pre></div>
</div>
312 313 314 315 316
<p>还需要重写<code class="docutils literal"><span class="pre">InferShape</span></code>接口。<code class="docutils literal"><span class="pre">InferShape</span></code>为const函数,不能修改Op的成员变量,参数为<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::InferShapeContext</span> <span class="pre">&amp;ctx</span></code>,通过该参数可获取到输入输出以及属性。它的功能是:</p>
<ul class="simple">
<li>1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。</li>
<li>2). 设置输出Tensor的形状。</li>
</ul>
317
<p>通常<code class="docutils literal"><span class="pre">OpProtoMaker</span></code><code class="docutils literal"><span class="pre">Op</span></code>类的定义写在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,和下面将要介绍的注册函数一起放在<code class="docutils literal"><span class="pre">.cc</span></code></p>
318 319 320
</div>
<div class="section" id="opkernel">
<span id="opkernel"></span><h3>3. 定义OpKernel类<a class="headerlink" href="#opkernel" title="永久链接至标题"></a></h3>
321 322 323 324 325 326 327 328 329 330 331 332
<p><code class="docutils literal"><span class="pre">MulKernel</span></code>继承自<code class="docutils literal"><span class="pre">framework::OpKernel</span></code>,带有下面两个模板参数:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">Place</span></code>: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></li>
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">T</span></code> : 表示数据类型,如<code class="docutils literal"><span class="pre">float</span></code>, <code class="docutils literal"><span class="pre">double</span></code>等。</li>
</ul>
<p>需要为<code class="docutils literal"><span class="pre">MulKernel</span></code>类重写<code class="docutils literal"><span class="pre">Compute</span></code>接口。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">Compute</span></code>接受一个输入参数:<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::ExecutionContext&amp;</span> <span class="pre">context</span></code></li>
<li><code class="docutils literal"><span class="pre">InferShapeContext</span></code>相比,<code class="docutils literal"><span class="pre">ExecutionContext</span></code>增加了设备类型,同样可获取到输入输出和属性参数。</li>
<li><code class="docutils literal"><span class="pre">Compute</span></code>函数里实现<code class="docutils literal"><span class="pre">OpKernel</span></code>的具体计算逻辑。</li>
</ul>
<p>下面是 <code class="docutils literal"><span class="pre">MulKernel</span></code> <code class="docutils literal"><span class="pre">Compute</span></code>的实现:</p>
333
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">Place</span><span class="p">,</span> <span class="k">typename</span> <span class="n">T</span><span class="o">&gt;</span>
334
<span class="k">class</span> <span class="nc">MulKernel</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpKernel</span> <span class="p">{</span>
335 336 337 338 339 340 341 342 343 344
<span class="k">public</span><span class="o">:</span>
<span class="kt">void</span> <span class="n">Compute</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">ExecutionContext</span><span class="o">&amp;</span> <span class="n">context</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">X</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Y</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Z</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">);</span>
  <span class="n">Z</span><span class="o">-&gt;</span><span class="n">mutable_data</span><span class="o">&lt;</span><span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="n">context</span><span class="p">.</span><span class="n">GetPlace</span><span class="p">());</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">device_context</span> <span class="o">=</span>
      <span class="k">const_cast</span><span class="o">&lt;</span><span class="n">platform</span><span class="o">::</span><span class="n">DeviceContext</span><span class="o">*&gt;</span><span class="p">(</span><span class="n">context</span><span class="p">.</span><span class="n">device_context_</span><span class="p">);</span>
  <span class="n">math</span><span class="o">::</span><span class="n">matmul</span><span class="o">&lt;</span><span class="n">Place</span><span class="p">,</span> <span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="o">*</span><span class="n">X</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="o">*</span><span class="n">Y</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Z</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">device_context</span><span class="p">);</span>
<span class="p">}</span>
345 346 347
<span class="p">};</span>
</pre></div>
</div>
348 349 350 351 352
<p>需要注意:<strong>不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个<code class="docutils literal"><span class="pre">OpKernel</span></code>,取决于<code class="docutils literal"><span class="pre">Compute</span></code>调用的函数是否支持不同设备。</strong></p>
<p><code class="docutils literal"><span class="pre">MulOp</span></code>的CPU、GPU实现共享同一个<code class="docutils literal"><span class="pre">Kernel</span></code><code class="docutils literal"><span class="pre">OpKernel</span></code>不共享的例子可以参考:<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></p>
<p>为了使<code class="docutils literal"><span class="pre">OpKernel</span></code>的计算过程书写更加简单,并且CPU、GPU的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现<code class="docutils literal"><span class="pre">Compute</span></code>接口。关于在PaddlePaddle中如何使用Eigen库,请参考<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md">使用文档</a></p>
<p>到此,前向Op实现完成。接下来,需要在<code class="docutils literal"><span class="pre">.cc</span></code>文件中注册该op和kernel。
反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。<strong>但需注意反向Op没有<code class="docutils literal"><span class="pre">ProtoMaker</span></code></strong></p>
353
</div>
354 355
<div class="section" id="operator">
<span id="id3"></span><h3>4. 注册Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
356 357
<ul>
<li><p class="first"><code class="docutils literal"><span class="pre">.cc</span></code>文件中注册前向、反向Op类,注册CPU Kernel。</p>
358
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
359
<span class="n">REGISTER_OP</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOp</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpMaker</span><span class="p">,</span> <span class="n">mul_grad</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpGrad</span><span class="p">);</span>
360 361 362 363 364
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUPlace</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
              <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUPlace</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
</pre></div>
</div>
365
<p>在上面的代码中:</p>
366
<ul class="simple">
367
<li><code class="docutils literal"><span class="pre">REGISTER_OP</span></code> : 注册<code class="docutils literal"><span class="pre">ops::MulOp</span></code>类,类型名为<code class="docutils literal"><span class="pre">mul</span></code>,该类的<code class="docutils literal"><span class="pre">ProtoMaker</span></code><code class="docutils literal"><span class="pre">ops::MulOpMaker</span></code>,注册<code class="docutils literal"><span class="pre">ops::MulOpGrad</span></code>,类型名为<code class="docutils literal"><span class="pre">mul_grad</span></code></li>
368 369 370
<li><code class="docutils literal"><span class="pre">REGISTER_OP_WITHOUT_GRADIENT</span></code> : 用于注册没有反向的Op。</li>
<li><code class="docutils literal"><span class="pre">REGISTER_OP_CPU_KERNEL</span></code> :注册<code class="docutils literal"><span class="pre">ops::MulKernel</span></code>类,并特化模板参数为<code class="docutils literal"><span class="pre">paddle::platform::CPUPlace</span></code><code class="docutils literal"><span class="pre">float</span></code>类型,同理,注册<code class="docutils literal"><span class="pre">ops::MulKernel</span></code>类。</li>
</ul>
371 372 373 374 375 376 377
</li>
</ul>
<ul>
<li><p class="first"><code class="docutils literal"><span class="pre">.cu</span></code>文件中注册GPU Kernel。</p>
<ul class="simple">
<li>请注意,如果GPU Kernel的实现基于Eigen unsupported模块,那么在 <code class="docutils literal"><span class="pre">.cu</span></code>的开始请加上宏定义 <code class="docutils literal"><span class="pre">#define</span> <span class="pre">EIGEN_USE_GPU</span></code>,代码示例如下:</li>
</ul>
378
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="c1">// if use Eigen unsupported module before include head files</span>
379 380 381
<span class="cp">#define EIGEN_USE_GPU</span>

<span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
382 383 384 385 386
<span class="n">REGISTER_OP_GPU_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">GPUPlace</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_GPU_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
                       <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">GPUPlace</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
</pre></div>
</div>
387 388
</li>
</ul>
389 390 391
</div>
<div class="section" id="">
<span id="id4"></span><h3>5. 编译<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
392
<p>运行下面命令可以进行编译:</p>
393 394 395 396 397 398 399
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">make</span> <span class="n">mul_op</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="python">
<span id="python"></span><h2>绑定Python<a class="headerlink" href="#python" title="永久链接至标题"></a></h2>
400
<p>系统会对新增的op自动绑定Python,并链接到生成的lib库中。</p>
401 402 403
</div>
<div class="section" id="">
<span id="id5"></span><h2>实现单元测试<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
404
<p>单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py"><code class="docutils literal"><span class="pre">MulOp</span></code>的单元测试</a></p>
405
<div class="section" id="operator">
406
<span id="id6"></span><h3>前向Operator单元测试<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
407 408 409 410 411 412
<p>前向Op单元测试继承自<code class="docutils literal"><span class="pre">unittest.TestCase</span></code>,并定义元类<code class="docutils literal"><span class="pre">__metaclass__</span> <span class="pre">=</span> <span class="pre">OpTestMeta</span></code>。各项更加具体的单元测试在<code class="docutils literal"><span class="pre">OpTestMeta</span></code>里完成。测试前向Operator,需要:</p>
<ol class="simple">
<li><code class="docutils literal"><span class="pre">setUp</span></code>函数定义输入、输出,以及相关的属性参数。</li>
<li>生成随机的输入数据。</li>
<li>在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。</li>
</ol>
413 414 415 416
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">unittest</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">gradient_checker</span> <span class="kn">import</span> <span class="n">GradientChecker</span><span class="p">,</span> <span class="n">create_op</span>
<span class="kn">from</span> <span class="nn">op_test_util</span> <span class="kn">import</span> <span class="n">OpTestMeta</span>
417 418

<span class="k">class</span> <span class="nc">TestMulOp</span><span class="p">(</span><span class="n">unittest</span><span class="o">.</span><span class="n">TestCase</span><span class="p">):</span>
419
    <span class="vm">__metaclass__</span> <span class="o">=</span> <span class="n">OpTestMeta</span>
420 421 422 423 424 425 426 427 428 429

    <span class="k">def</span> <span class="nf">setUp</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">type</span> <span class="o">=</span> <span class="s2">&quot;mul&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
            <span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
            <span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
        <span class="p">}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">outputs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;Out&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;X&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;Y&#39;</span><span class="p">])}</span>
</pre></div>
</div>
430
<p>上面的代码首先导入依赖的包,下面是对<code class="docutils literal"><span class="pre">setUp</span></code>函数中操作的重要变量的详细解释:</p>
431
<ul class="simple">
432 433 434
<li><code class="docutils literal"><span class="pre">self.type</span> <span class="pre">=</span> <span class="pre">&quot;mul&quot;</span></code> : 定义类型,与operator注册时注册的类型一致。</li>
<li><code class="docutils literal"><span class="pre">self.inputs</span></code> : 定义输入,类型为<code class="docutils literal"><span class="pre">numpy.array</span></code>,并初始化。</li>
<li><code class="docutils literal"><span class="pre">self.outputs</span></code> : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。</li>
435 436 437
</ul>
</div>
<div class="section" id="operator">
438
<span id="id7"></span><h3>反向Operator单元测试<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
439 440 441 442 443 444 445
<p>反向Op单元测试继承自<code class="docutils literal"><span class="pre">GradientChecker</span></code>,而<code class="docutils literal"><span class="pre">GradientChecker</span></code>继承自<code class="docutils literal"><span class="pre">unittest.TestCase</span></code>,因此,<strong>反向单元测试函数需要以<code class="docutils literal"><span class="pre">test_</span></code>开头</strong></p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">TestMulGradOp</span><span class="p">(</span><span class="n">GradientChecker</span><span class="p">):</span>
    <span class="k">def</span> <span class="nf">setUp</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">op</span> <span class="o">=</span> <span class="n">create_op</span><span class="p">(</span><span class="s2">&quot;mul&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
            <span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
            <span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
446 447
        <span class="p">}</span>

448 449
    <span class="k">def</span> <span class="nf">test_cpu_gpu_compare</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">compare_grad</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">)</span>
450

451 452 453 454
    <span class="k">def</span> <span class="nf">test_normal</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="c1"># mul op will enlarge the relative error</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;X&quot;</span><span class="p">,</span> <span class="s2">&quot;Y&quot;</span><span class="p">],</span> <span class="s2">&quot;Out&quot;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span>
455

456 457 458 459 460
    <span class="k">def</span> <span class="nf">test_ignore_x</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;Y&quot;</span><span class="p">],</span>
            <span class="s2">&quot;Out&quot;</span><span class="p">,</span>
461
            <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span>
462
            <span class="n">no_grad_set</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;X&quot;</span><span class="p">})</span>
463

464 465 466 467 468
    <span class="k">def</span> <span class="nf">test_ignore_y</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">op</span><span class="p">,</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;X&quot;</span><span class="p">],</span>
            <span class="s2">&quot;Out&quot;</span><span class="p">,</span>
469
            <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span>
470
            <span class="n">no_grad_set</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;Y&quot;</span><span class="p">})</span>
471 472
</pre></div>
</div>
473
<p>下面解释代码中一些关键的地方:</p>
474 475 476
<ul class="simple">
<li>调用<code class="docutils literal"><span class="pre">create_op(&quot;mul&quot;)</span></code>创建反向Op对应的前向Op。</li>
<li>调用<code class="docutils literal"><span class="pre">compare_grad</span></code>函数对比CPU、GPU计算结果。</li>
477
<li><code class="docutils literal"><span class="pre">test_normal</span></code>中调用<code class="docutils literal"><span class="pre">check_grad</span></code>使用数值法检测梯度正确性和稳定性。<ul>
478 479 480
<li>第一个参数<code class="docutils literal"><span class="pre">self.op</span></code> : 前向Op。</li>
<li>第二个参数<code class="docutils literal"><span class="pre">self.inputs</span></code> : 输入词典,词典的Key和<code class="docutils literal"><span class="pre">ProtoMaker</span></code>定义保持一致。</li>
<li>第三个参数<code class="docutils literal"><span class="pre">[&quot;X&quot;,</span> <span class="pre">&quot;Y&quot;]</span></code> : 指定对输入变量<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>做梯度检测。</li>
481 482 483
<li>第四个参数<code class="docutils literal"><span class="pre">&quot;Out&quot;</span></code> : 指定前向网络最终的输出目标变量<code class="docutils literal"><span class="pre">Out</span></code></li>
</ul>
</li>
484
<li><code class="docutils literal"><span class="pre">test_ignore_x</span></code><code class="docutils literal"><span class="pre">test_ignore_y</span></code>分支用来测试只需要计算一个输入梯度的情况。</li>
485 486
</ul>
</div>
487
<div class="section" id="">
488
<span id="id8"></span><h3>编译和执行单元测试<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
489
<p><code class="docutils literal"><span class="pre">python/paddle/v2/framework/tests</span></code> 目录下新增的 <code class="docutils literal"><span class="pre">test_*.py</span></code> 单元测试会被自动加入工程进行编译。</p>
490
<p>请注意,<strong>不同于Op的编译测试,运行单元测试测时需要编译整个工程</strong>,并且编译时需要打开<code class="docutils literal"><span class="pre">WITH_TESTING</span></code>, 即<code class="docutils literal"><span class="pre">cmake</span> <span class="pre">paddle_dir</span> <span class="pre">-DWITH_TESTING=ON</span></code>。编译成功后,执行下面的命令来运行单元测试:</p>
491
<div class="highlight-bash"><div class="highlight"><pre><span></span>make <span class="nb">test</span> <span class="nv">ARGS</span><span class="o">=</span><span class="s2">&quot;-R test_mul_op -V&quot;</span>
492 493 494
</pre></div>
</div>
<p>或者:</p>
495
<div class="highlight-bash"><div class="highlight"><pre><span></span>ctest -R test_mul_op
496 497 498
</pre></div>
</div>
</div>
499
</div>
500 501 502 503 504 505 506 507 508
<div class="section" id="">
<span id="id9"></span><h2>注意事项<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
<ul class="simple">
<li>为每个Op创建单独的<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有)、<code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。</li>
<li>注册Op时的类型名,需要和该Op的名字一样。即不允许在<code class="docutils literal"><span class="pre">A_op.cc</span></code>里面,注册<code class="docutils literal"><span class="pre">REGISTER_OP(B,</span> <span class="pre">...)</span></code>等,这将会导致单元测试出错。</li>
<li>如果Op没有实现GPU Kernel,请不要创建空的<code class="docutils literal"><span class="pre">*_op.cu</span></code>,这将会导致单元测试出错。</li>
<li>如果多个Op依赖一些共用的函数,可以创建非<code class="docutils literal"><span class="pre">*_op.*</span></code>格式的文件来存放,如<code class="docutils literal"><span class="pre">gather.h</span></code>文件。</li>
</ul>
</div>
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
</div>


           </div>
          </div>
          <footer>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'../../',
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
            HAS_SOURCE:  true,
            SOURCELINK_SUFFIX: ".txt",
        };
    </script>
      <script type="text/javascript" src="../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../_static/doctools.js"></script>
      <script type="text/javascript" src="../../_static/translations.js"></script>
      <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script>
       
  

  
  
    <script type="text/javascript" src="../../_static/js/theme.js"></script>
  
  
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script>
  <script src="../../_static/js/paddle_doc_init.js"></script> 

</body>
</html>