new_op_cn.html 55.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>如何写新的Operator &mdash; PaddlePaddle  文档</title>
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  

  
  
        <link rel="index" title="索引"
              href="../../genindex.html"/>
        <link rel="search" title="搜索" href="../../search.html"/>
    <link rel="top" title="PaddlePaddle  文档" href="../../index.html"/> 

  <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/override.css" type="text/css" />
  <script>
  var _hmt = _hmt || [];
  (function() {
    var hm = document.createElement("script");
    hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
    var s = document.getElementsByTagName("script")[0]; 
    s.parentNode.insertBefore(hm, s);
  })();
  </script>

  

  
  <script src="../../_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

  
  <header class="site-header">
    <div class="site-logo">
      <a href="/"><img src="../../_static/images/PP_w.png"></a>
    </div>
    <div class="site-nav-links">
      <div class="site-menu">
        <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a>
        <div class="language-switcher dropdown">
          <a type="button" data-toggle="dropdown">
            <span>English</span>
            <i class="fa fa-angle-up"></i>
            <i class="fa fa-angle-down"></i>
          </a>
          <ul class="dropdown-menu">
            <li><a href="/doc_cn">中文</a></li>
            <li><a href="/doc">English</a></li>
          </ul>
        </div>
        <ul class="site-page-links">
          <li><a href="/">Home</a></li>
        </ul>
      </div>
      <div class="doc-module">
        
        <ul>
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a></li>
<li class="toctree-l1"><a class="reference internal" href="../index_cn.html">进阶指南</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../api/index_cn.html">API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a></li>
88
<li class="toctree-l1"><a class="reference internal" href="../../mobile/index_cn.html">MOBILE</a></li>
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
</ul>

        
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>        
      </div>
    </div>
  </header>
  
  <div class="main-content-wrap">

    
    <nav class="doc-menu-vertical" role="navigation">
        
          
          <ul>
<li class="toctree-l1"><a class="reference internal" href="../../getstarted/index_cn.html">新手入门</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/build_and_install/index_cn.html">安装与编译</a><ul>
112 113
<li class="toctree-l3"><a class="reference internal" href="../../getstarted/build_and_install/pip_install_cn.html">使用pip安装</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../getstarted/build_and_install/docker_install_cn.html">使用Docker安装运行</a></li>
114
<li class="toctree-l3"><a class="reference internal" href="build_cn.html">用Docker编译和测试PaddlePaddle</a></li>
115
<li class="toctree-l3"><a class="reference internal" href="../../getstarted/build_and_install/build_from_source_cn.html">从源码编译</a></li>
116 117 118 119 120 121 122 123 124 125 126 127
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../index_cn.html">进阶指南</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../usage/cmd_parameter/index_cn.html">设置命令行参数</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/use_case_cn.html">使用案例</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/arguments_cn.html">参数概述</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cmd_parameter/detail_introduction_cn.html">细节描述</a></li>
</ul>
</li>
128 129 130 131 132 133
<li class="toctree-l2"><a class="reference internal" href="../usage/cluster/cluster_train_cn.html">分布式训练</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../usage/cluster/fabric_cn.html">fabric集群</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cluster/openmpi_cn.html">openmpi集群</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cluster/k8s_cn.html">kubernetes单机</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cluster/k8s_distributed_cn.html">kubernetes distributed分布式</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/cluster/k8s_aws_cn.html">AWS上运行kubernetes集群训练</a></li>
134 135
</ul>
</li>
136 137 138 139 140 141
<li class="toctree-l2"><a class="reference internal" href="../usage/capi/index_cn.html">PaddlePaddle C-API</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../usage/capi/compile_paddle_lib_cn.html">编译 PaddlePaddle 预测库</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/capi/organization_of_the_inputs_cn.html">输入/输出数据组织</a></li>
<li class="toctree-l3"><a class="reference internal" href="../usage/capi/workflow_of_capi_cn.html">C-API 使用流程</a></li>
</ul>
</li>
142
<li class="toctree-l2"><a class="reference internal" href="contribute_to_paddle_cn.html">如何贡献代码</a></li>
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
<li class="toctree-l2"><a class="reference internal" href="write_docs_cn.html">如何贡献/修改文档</a></li>
<li class="toctree-l2"><a class="reference internal" href="../deep_model/rnn/index_cn.html">RNN相关模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/rnn_config_cn.html">RNN配置</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/recurrent_group_cn.html">Recurrent Group教程</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deep_model/rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../optimization/gpu_profiling_cn.html">GPU性能分析与调优</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../api/index_cn.html">API</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/model_configs.html">模型配置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/activation.html">Activation</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/layer.html">Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/evaluators.html">Evaluators</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/optimizer.html">Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/pooling.html">Pooling</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/networks.html">Networks</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/config/attr.html">Parameter Attribute</a></li>
</ul>
</li>
165 166 167 168 169 170
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/data.html">数据访问</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/data/data_reader.html">Data Reader Interface</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/data/image.html">Image Interface</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/data/dataset.html">Dataset</a></li>
</ul>
</li>
171
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/run_logic.html">训练与应用</a></li>
172 173 174 175 176 177 178 179 180 181 182
<li class="toctree-l2"><a class="reference internal" href="../../api/v2/fluid.html">Fluid</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/layers.html">Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/data_feeder.html">DataFeeder</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/executor.html">Executor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/initializer.html">Initializer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/evaluator.html">Evaluator</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/nets.html">Nets</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/optimizer.html">Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/param_attr.html">ParamAttr</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/profiler.html">Profiler</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/regularizer.html">Regularizer</a></li>
183
<li class="toctree-l3"><a class="reference internal" href="../../api/v2/fluid/io.html">IO</a></li>
184 185
</ul>
</li>
186 187
</ul>
</li>
188 189 190 191 192 193 194 195
<li class="toctree-l1"><a class="reference internal" href="../../faq/index_cn.html">FAQ</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../faq/build_and_install/index_cn.html">编译安装与单元测试</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/model/index_cn.html">模型配置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/parameter/index_cn.html">参数设置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/local/index_cn.html">本地训练与预测</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq/cluster/index_cn.html">集群训练与预测</a></li>
</ul>
</li>
196
<li class="toctree-l1"><a class="reference internal" href="../../mobile/index_cn.html">MOBILE</a><ul>
197 198 199
<li class="toctree-l2"><a class="reference internal" href="../../mobile/cross_compiling_for_android_cn.html">Android平台编译指南</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../mobile/cross_compiling_for_ios_cn.html">iOS平台编译指南</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../mobile/cross_compiling_for_raspberry_cn.html">Raspberry Pi平台编译指南</a></li>
200 201
</ul>
</li>
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
</ul>

        
    </nav>
    
    <section class="doc-content-wrap">

      

 







<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
      
    <li>如何写新的Operator</li>
  </ul>
</div>
      
      <div class="wy-nav-content" id="doc-content">
        <div class="rst-content">
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="operator">
<span id="operator"></span><h1>如何写新的Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h1>
<ul class="simple">
<li><a class="reference external" href="#概念简介">概念简介</a></li>
235 236 237 238 239
<li><a class="reference external" href="#实现c类">实现C++类</a><ul>
<li><a class="reference external" href="#定义protomaker类">定义ProtoMaker类</a></li>
<li><a class="reference external" href="#定义operator类">定义Operator类</a></li>
<li><a class="reference external" href="#定义opkernel类">定义OpKernel类</a></li>
<li><a class="reference external" href="#注册operator">注册Operator</a></li>
240 241 242
<li><a class="reference external" href="#编译">编译</a></li>
</ul>
</li>
243
<li><a class="reference external" href="#绑定python">绑定Python</a></li>
244
<li><a class="reference external" href="#实现单元测试">实现单元测试</a><ul>
245 246
<li><a class="reference external" href="#前向operator单测">前向Operator单测</a></li>
<li><a class="reference external" href="#反向operator单测">反向Operator单测</a></li>
247
<li><a class="reference external" href="#编译和执行">编译和执行</a></li>
248 249
</ul>
</li>
250
<li><a class="reference external" href="#注意事项">注意事项</a></li>
251 252 253 254 255 256 257 258 259 260
</ul>
<div class="section" id="">
<span id="id1"></span><h2>概念简介<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
<p>简单介绍需要用到基类,详细介绍请参考设计文档。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OperatorBase</span></code>: Operator(简写,Op)基类。</li>
<li><code class="docutils literal"><span class="pre">framework::OpKernel</span></code>: Op计算函数的基类,称作Kernel。</li>
<li><code class="docutils literal"><span class="pre">framework::OperatorWithKernel</span></code>:继承自OperatorBase,Op有计算函数,称作有Kernel。</li>
<li><code class="docutils literal"><span class="pre">class</span> <span class="pre">OpProtoAndCheckerMaker</span></code>:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成</li>
</ul>
261
<p>依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>,后者继承自<code class="docutils literal"><span class="pre">OperatorBase</span></code>。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下:</p>
262 263 264
<p>内容            | 定义位置
&#8212;&#8212;&#8212;&#8212;&#8211;  | :&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;-
OpProtoMake定义  | <code class="docutils literal"><span class="pre">.cc</span></code>文件,Backward Op不需要定义OpProtoMake
265
Op定义           | <code class="docutils literal"><span class="pre">.cc</span></code>文件
266 267
Kernel实现       | CPU、CUDA共享Kernel实现在<code class="docutils literal"><span class="pre">.h</span></code>文件中,否则,CPU 实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,CUDA 实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中。
注册Op           | Op注册实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件;Kernel注册CPU实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,CUDA实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中</p>
268
<p>实现新的op都添加至目录<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators">paddle/operators</a>下,文件命名以<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有) 、 <code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)结尾。<strong>系统会根据文件名自动构建op和其对应的Python扩展。</strong></p>
269 270 271 272 273
<p>下面以矩阵乘操作,即<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc">MulOp</a>为例来介绍如何写带Kernel的Operator。</p>
</div>
<div class="section" id="c">
<span id="c"></span><h2>实现C++类<a class="headerlink" href="#c" title="永久链接至标题"></a></h2>
<div class="section" id="protomaker">
274
<span id="protomaker"></span><h3>定义ProtoMaker类<a class="headerlink" href="#protomaker" title="永久链接至标题"></a></h3>
275 276
<p>矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。</p>
<p>首先定义<code class="docutils literal"><span class="pre">ProtoMaker</span></code>来描述该Op的输入、输出,并添加注释:</p>
277 278
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
279
  <span class="n">MulOpMaker</span><span class="p">(</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
280
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
281 282 283
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (M x K)&quot;</span><span class="p">);</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (K x N)&quot;</span><span class="p">);</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (M x N)&quot;</span><span class="p">);</span>
284 285 286 287
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s"></span>
<span class="s">Two Element Mul Operator.</span>
<span class="s">The equation is: Out = X * Y</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
288 289 290 291
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
292
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43"><code class="docutils literal"><span class="pre">MulOpMaker</span></code></a>继承自<code class="docutils literal"><span class="pre">framework::OpProtoAndCheckerMaker</span></code>,构造函数含有2个参数:</p>
293 294 295 296
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OpProto</span></code> : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。</li>
<li><code class="docutils literal"><span class="pre">framework::OpAttrChecker</span></code> :后者用于检查参数属性的合法性。</li>
</ul>
297
<p>构造函数里通过<code class="docutils literal"><span class="pre">AddInput</span></code>添加输入参数,通过<code class="docutils literal"><span class="pre">AddOutput</span></code>添加输出参数,通过<code class="docutils literal"><span class="pre">AddComment</span></code>添加Op的注释。这些函数会将对应内容添加到<code class="docutils literal"><span class="pre">OpProto</span></code>中。</p>
298
<p>上面的代码在<code class="docutils literal"><span class="pre">MulOp</span></code>中添加两个输入<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>,添加了一个输出<code class="docutils literal"><span class="pre">Out</span></code>,并解释了各自含义,命名请遵守<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/name_convention.md">命名规范</a></p>
299
<p>再以<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37"><code class="docutils literal"><span class="pre">ScaleOp</span></code></a>为例:</p>
300 301 302
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">AttrType</span><span class="o">&gt;</span>
<span class="k">class</span> <span class="nc">ScaleOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
303
  <span class="n">ScaleOpMaker</span><span class="p">(</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
304 305 306 307 308 309 310
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;The input tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;The output tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s">Scale operator</span>
<span class="s">The equation is: Out = scale*X</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
    <span class="n">AddAttr</span><span class="o">&lt;</span><span class="n">AttrType</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;scale&quot;</span><span class="p">,</span> <span class="s">&quot;scale of scale operator.&quot;</span><span class="p">).</span><span class="n">SetDefault</span><span class="p">(</span><span class="mf">1.0</span><span class="p">);</span>
311 312 313 314
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
315
<p>这个例子有两处不同:</p>
316
<ul class="simple">
317
<li><code class="docutils literal"><span class="pre">AddInput(&quot;X&quot;,&quot;...&quot;).NotInGradient()</span></code> : 表示<code class="docutils literal"><span class="pre">X</span></code>这个输入不参与<code class="docutils literal"><span class="pre">ScaleOp</span></code>对应的梯度Op计算之中,如果Op的某个输入不参与反向梯度的计算,请显示地调用<code class="docutils literal"><span class="pre">.NotInGradient()</span></code>进行设置。</li>
318 319 320 321
<li><code class="docutils literal"><span class="pre">AddAttr&lt;AttrType&gt;(&quot;scale&quot;,</span> <span class="pre">&quot;...&quot;).SetDefault(1.0);</span></code> : 增加<code class="docutils literal"><span class="pre">scale</span></code>系数,作为参数属性,并且设置默认值为1.0。</li>
</ul>
</div>
<div class="section" id="operator">
322
<span id="id2"></span><h3>定义Operator类<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
323
<p>下面的点实现了MulOp的定义:</p>
324
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOp</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span> <span class="p">{</span>
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
 <span class="k">public</span><span class="o">:</span>
  <span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>

 <span class="k">protected</span><span class="o">:</span>
  <span class="kt">void</span> <span class="n">InferShape</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">InferShapeContext</span> <span class="o">&amp;</span><span class="n">ctx</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
    <span class="k">auto</span> <span class="n">dim0</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="k">auto</span> <span class="n">dim1</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim0</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input X(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim1</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input Y(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span>
        <span class="n">dim0</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
        <span class="s">&quot;First matrix&#39;s width must be equal with second matrix&#39;s height.&quot;</span><span class="p">);</span>
    <span class="n">ctx</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">Resize</span><span class="p">({</span><span class="n">dim0</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">1</span><span class="p">]});</span>
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22"><code class="docutils literal"><span class="pre">MulOp</span></code></a>继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code><code class="docutils literal"><span class="pre">public</span></code>成员:</p>
347
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>
348 349 350
</pre></div>
</div>
<p>这句表示使用基类<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>的构造函数,也可写成:</p>
351
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="n">MulOp</span><span class="p">(</span><span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span> <span class="o">&amp;</span><span class="n">type</span><span class="p">,</span> <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">inputs</span><span class="p">,</span>
352 353 354 355 356
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">outputs</span><span class="p">,</span>
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">AttributeMap</span> <span class="o">&amp;</span><span class="n">attrs</span><span class="p">)</span>
  <span class="o">:</span> <span class="n">OperatorWithKernel</span><span class="p">(</span><span class="n">type</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">outputs</span><span class="p">,</span> <span class="n">attrs</span><span class="p">)</span> <span class="p">{}</span>
</pre></div>
</div>
357 358 359 360 361
<p>还需要重写<code class="docutils literal"><span class="pre">InferShape</span></code>接口。<code class="docutils literal"><span class="pre">InferShape</span></code>为const函数,不能修改Op的成员变量,参数为<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::InferShapeContext</span> <span class="pre">&amp;ctx</span></code>,通过该参数可获取到输入输出以及属性。它的功能是:</p>
<ul class="simple">
<li>1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。</li>
<li>2). 设置输出Tensor的形状。</li>
</ul>
362
<p>通常<code class="docutils literal"><span class="pre">OpProtoMaker</span></code><code class="docutils literal"><span class="pre">Op</span></code>类的定义写在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,和下面将要介绍的注册函数一起放在<code class="docutils literal"><span class="pre">.cc</span></code></p>
363 364
</div>
<div class="section" id="opkernel">
365
<span id="opkernel"></span><h3>定义OpKernel类<a class="headerlink" href="#opkernel" title="永久链接至标题"></a></h3>
366 367
<p><code class="docutils literal"><span class="pre">MulKernel</span></code>继承自<code class="docutils literal"><span class="pre">framework::OpKernel</span></code>,带有下面两个模板参数:</p>
<ul class="simple">
368
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">DeviceContext</span></code>: 表示设备类型,不同设备(CPU、CUDA)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></li>
369 370 371 372 373 374 375 376 377
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">T</span></code> : 表示数据类型,如<code class="docutils literal"><span class="pre">float</span></code>, <code class="docutils literal"><span class="pre">double</span></code>等。</li>
</ul>
<p>需要为<code class="docutils literal"><span class="pre">MulKernel</span></code>类重写<code class="docutils literal"><span class="pre">Compute</span></code>接口。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">Compute</span></code>接受一个输入参数:<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::ExecutionContext&amp;</span> <span class="pre">context</span></code></li>
<li><code class="docutils literal"><span class="pre">InferShapeContext</span></code>相比,<code class="docutils literal"><span class="pre">ExecutionContext</span></code>增加了设备类型,同样可获取到输入输出和属性参数。</li>
<li><code class="docutils literal"><span class="pre">Compute</span></code>函数里实现<code class="docutils literal"><span class="pre">OpKernel</span></code>的具体计算逻辑。</li>
</ul>
<p>下面是 <code class="docutils literal"><span class="pre">MulKernel</span></code> <code class="docutils literal"><span class="pre">Compute</span></code>的实现:</p>
378 379 380 381 382 383 384 385 386 387 388 389
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">DeviceContext</span><span class="p">,</span> <span class="k">typename</span> <span class="n">T</span><span class="o">&gt;</span>
<span class="k">class</span> <span class="nc">MulKernel</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpKernel</span> <span class="p">{</span>
<span class="k">public</span><span class="o">:</span>
<span class="kt">void</span> <span class="n">Compute</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">ExecutionContext</span><span class="o">&amp;</span> <span class="n">context</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">X</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Y</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Z</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">);</span>
  <span class="n">Z</span><span class="o">-&gt;</span><span class="n">mutable_data</span><span class="o">&lt;</span><span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="n">context</span><span class="p">.</span><span class="n">GetPlace</span><span class="p">());</span>
  <span class="k">auto</span><span class="o">&amp;</span> <span class="n">device_context</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="k">template</span> <span class="n">device_context</span><span class="o">&lt;</span><span class="n">DeviceContext</span><span class="o">&gt;</span><span class="p">();</span>
  <span class="n">math</span><span class="o">::</span><span class="n">matmul</span><span class="o">&lt;</span><span class="n">DeviceContext</span><span class="p">,</span> <span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="o">*</span><span class="n">X</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="o">*</span><span class="n">Y</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Z</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">device_context</span><span class="p">);</span>
<span class="p">}</span>
<span class="p">};</span>
390 391
</pre></div>
</div>
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
<p>需要注意:<strong>不同设备(CPU、CUDA)共享一个Op定义,是否则共享同一个<code class="docutils literal"><span class="pre">OpKernel</span></code>,取决于<code class="docutils literal"><span class="pre">Compute</span></code>调用的函数是否支持不同设备。</strong></p>
<p><code class="docutils literal"><span class="pre">MulOp</span></code>的CPU、CUDA实现共享同一个<code class="docutils literal"><span class="pre">Kernel</span></code><code class="docutils literal"><span class="pre">OpKernel</span></code>不共享的例子可以参考:<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></p>
<p>为了使<code class="docutils literal"><span class="pre">OpKernel</span></code>的计算过程书写更加简单,并且CPU、CUDA的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现<code class="docutils literal"><span class="pre">Compute</span></code>接口。关于在PaddlePaddle中如何使用Eigen库,请参考<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md">使用文档</a></p>
<p>到此,前向Op实现完成。接下来,需要在<code class="docutils literal"><span class="pre">.cc</span></code>文件中注册该op和kernel。
反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。<strong>但需注意反向Op没有<code class="docutils literal"><span class="pre">ProtoMaker</span></code></strong></p>
</div>
<div class="section" id="operator">
<span id="id3"></span><h3>注册Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
<ul>
<li><p class="first"><code class="docutils literal"><span class="pre">.cc</span></code>文件中注册前向、反向Op类,注册CPU Kernel。</p>
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
<span class="n">REGISTER_OP</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOp</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpMaker</span><span class="p">,</span> <span class="n">mul_grad</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpGrad</span><span class="p">);</span>
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUDeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
              <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUDeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
407 408
</pre></div>
</div>
409 410 411 412 413 414 415 416
<p>在上面的代码中:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">REGISTER_OP</span></code> : 注册<code class="docutils literal"><span class="pre">ops::MulOp</span></code>类,类型名为<code class="docutils literal"><span class="pre">mul</span></code>,该类的<code class="docutils literal"><span class="pre">ProtoMaker</span></code><code class="docutils literal"><span class="pre">ops::MulOpMaker</span></code>,注册<code class="docutils literal"><span class="pre">ops::MulOpGrad</span></code>,类型名为<code class="docutils literal"><span class="pre">mul_grad</span></code></li>
<li><code class="docutils literal"><span class="pre">REGISTER_OP_WITHOUT_GRADIENT</span></code> : 用于注册没有反向的Op。</li>
<li><code class="docutils literal"><span class="pre">REGISTER_OP_CPU_KERNEL</span></code> :注册<code class="docutils literal"><span class="pre">ops::MulKernel</span></code>类,并特化模板参数为<code class="docutils literal"><span class="pre">paddle::platform::CPUPlace</span></code><code class="docutils literal"><span class="pre">float</span></code>类型,同理,注册<code class="docutils literal"><span class="pre">ops::MulGradKernel</span></code>类。</li>
</ul>
</li>
</ul>
417
<ul>
418
<li><p class="first"><code class="docutils literal"><span class="pre">.cu</span></code>文件中注册CUDA Kernel。</p>
419
<ul class="simple">
420
<li>请注意,如果CUDA Kernel的实现基于Eigen unsupported模块,那么在 <code class="docutils literal"><span class="pre">.cu</span></code>的开始请加上宏定义 <code class="docutils literal"><span class="pre">#define</span> <span class="pre">EIGEN_USE_GPU</span></code>,代码示例如下:</li>
421
</ul>
422
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="c1">// if use Eigen unsupported module before include head files</span>
423
<span class="cp">#define EIGEN_USE_GPU</span>
424 425

<span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
426 427 428
<span class="n">REGISTER_OP_CUDA_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CUDADeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_CUDA_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
                       <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CUDADeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
429 430
</pre></div>
</div>
431 432
</li>
</ul>
433 434
</div>
<div class="section" id="">
435
<span id="id4"></span><h3>编译<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
436
<p>运行下面命令可以进行编译:</p>
437 438 439 440 441 442 443
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">make</span> <span class="n">mul_op</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="python">
<span id="python"></span><h2>绑定Python<a class="headerlink" href="#python" title="永久链接至标题"></a></h2>
444
<p>系统会对新增的op自动绑定Python,并链接到生成的lib库中。</p>
445 446
</div>
<div class="section" id="">
447
<span id="id5"></span><h2>实现单元测试<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
448
<p>单测包括对比前向Op不同设备(CPU、CUDA)的实现、对比反向OP不同设备(CPU、CUDA)的实现、反向Op的梯度测试。下面介绍介绍<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py"><code class="docutils literal"><span class="pre">MulOp</span></code>的单元测试</a></p>
449 450
<div class="section" id="operator">
<span id="id6"></span><h3>前向Operator单测<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
451
<p>Op单元测试继承自<code class="docutils literal"><span class="pre">OpTest</span></code>。各项更加具体的单元测试在<code class="docutils literal"><span class="pre">TestMulOp</span></code>里完成。测试Operator,需要:</p>
452 453 454 455
<ol class="simple">
<li><code class="docutils literal"><span class="pre">setUp</span></code>函数定义输入、输出,以及相关的属性参数。</li>
<li>生成随机的输入数据。</li>
<li>在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。</li>
456
<li>反向计算已经自动集成进测试框架,直接调用相应接口即可。</li>
457
</ol>
458 459
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">unittest</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>
460
<span class="kn">from</span> <span class="nn">op_test</span> <span class="kn">import</span> <span class="n">OpTest</span>
461 462


463
<span class="k">class</span> <span class="nc">TestMulOp</span><span class="p">(</span><span class="n">OpTest</span><span class="p">):</span>
464
    <span class="k">def</span> <span class="nf">setUp</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
465
        <span class="bp">self</span><span class="o">.</span><span class="n">op_type</span> <span class="o">=</span> <span class="s2">&quot;mul&quot;</span>
466 467 468 469 470
        <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
            <span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
            <span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
        <span class="p">}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">outputs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;Out&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;X&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;Y&#39;</span><span class="p">])}</span>
471 472 473

    <span class="k">def</span> <span class="nf">test_check_output</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_output</span><span class="p">()</span>
474

475 476
    <span class="k">def</span> <span class="nf">test_check_grad_normal</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">([</span><span class="s1">&#39;X&#39;</span><span class="p">,</span> <span class="s1">&#39;Y&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span>
477

478
    <span class="k">def</span> <span class="nf">test_check_grad_ingore_x</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
479
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
480
            <span class="p">[</span><span class="s1">&#39;Y&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span> <span class="n">no_grad_set</span><span class="o">=</span><span class="nb">set</span><span class="p">(</span><span class="s2">&quot;X&quot;</span><span class="p">))</span>
481

482
    <span class="k">def</span> <span class="nf">test_check_grad_ingore_y</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
483
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
484
            <span class="p">[</span><span class="s1">&#39;X&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span> <span class="n">no_grad_set</span><span class="o">=</span><span class="nb">set</span><span class="p">(</span><span class="s1">&#39;Y&#39;</span><span class="p">))</span>
485 486
</pre></div>
</div>
487 488 489 490 491 492
<p>上面的代码首先导入依赖的包,下面是对<code class="docutils literal"><span class="pre">setUp</span></code>函数中操作的重要变量的详细解释:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">self.op_type</span> <span class="pre">=</span> <span class="pre">&quot;mul&quot;</span></code> : 定义类型,与operator注册时注册的类型一致。</li>
<li><code class="docutils literal"><span class="pre">self.inputs</span></code> : 定义输入,类型为<code class="docutils literal"><span class="pre">numpy.array</span></code>,并初始化。</li>
<li><code class="docutils literal"><span class="pre">self.outputs</span></code> : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。</li>
</ul>
493 494 495
</div>
<div class="section" id="operator">
<span id="id7"></span><h3>反向operator单测<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
496
<p>而反向测试中:</p>
497
<ul class="simple">
498 499 500 501
<li><code class="docutils literal"><span class="pre">test_check_grad_normal</span></code>中调用<code class="docutils literal"><span class="pre">check_grad</span></code>使用数值法检测梯度正确性和稳定性。<ul>
<li>第一个参数<code class="docutils literal"><span class="pre">[&quot;X&quot;,</span> <span class="pre">&quot;Y&quot;]</span></code> : 指定对输入变量<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>做梯度检测。</li>
<li>第二个参数<code class="docutils literal"><span class="pre">&quot;Out&quot;</span></code> : 指定前向网络最终的输出目标变量<code class="docutils literal"><span class="pre">Out</span></code></li>
<li>第三个参数<code class="docutils literal"><span class="pre">max_relative_error</span></code>:指定检测梯度时能容忍的最大错误值。</li>
502 503
</ul>
</li>
504
<li><code class="docutils literal"><span class="pre">test_check_grad_ingore_x</span></code><code class="docutils literal"><span class="pre">test_check_grad_ingore_y</span></code>分支用来测试只需要计算一个输入梯度的情况。</li>
505
</ul>
506
</div>
507
<div class="section" id="">
508
<span id="id8"></span><h3>编译和执行<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
509
<p><code class="docutils literal"><span class="pre">python/paddle/v2/framework/tests</span></code> 目录下新增的 <code class="docutils literal"><span class="pre">test_*.py</span></code> 单元测试会被自动加入工程进行编译。</p>
510
<p>请注意,<strong>不同于Op的编译测试,运行单元测试测时需要编译整个工程</strong>,并且编译时需要打开<code class="docutils literal"><span class="pre">WITH_TESTING</span></code>, 即<code class="docutils literal"><span class="pre">cmake</span> <span class="pre">paddle_dir</span> <span class="pre">-DWITH_TESTING=ON</span></code>。编译成功后,执行下面的命令来运行单元测试:</p>
511
<div class="highlight-bash"><div class="highlight"><pre><span></span>make <span class="nb">test</span> <span class="nv">ARGS</span><span class="o">=</span><span class="s2">&quot;-R test_mul_op -V&quot;</span>
512 513 514
</pre></div>
</div>
<p>或者:</p>
515
<div class="highlight-bash"><div class="highlight"><pre><span></span>ctest -R test_mul_op
516 517 518
</pre></div>
</div>
</div>
519
</div>
520
<div class="section" id="">
521
<span id="id9"></span><h2>注意事项<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
522 523 524
<ul class="simple">
<li>为每个Op创建单独的<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有)、<code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。</li>
<li>注册Op时的类型名,需要和该Op的名字一样。即不允许在<code class="docutils literal"><span class="pre">A_op.cc</span></code>里面,注册<code class="docutils literal"><span class="pre">REGISTER_OP(B,</span> <span class="pre">...)</span></code>等,这将会导致单元测试出错。</li>
525
<li>如果Op没有实现CUDA Kernel,请不要创建空的<code class="docutils literal"><span class="pre">*_op.cu</span></code>,这将会导致单元测试出错。</li>
526 527 528
<li>如果多个Op依赖一些共用的函数,可以创建非<code class="docutils literal"><span class="pre">*_op.*</span></code>格式的文件来存放,如<code class="docutils literal"><span class="pre">gather.h</span></code>文件。</li>
</ul>
</div>
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
</div>


           </div>
          </div>
          <footer>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'../../',
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
            HAS_SOURCE:  true,
            SOURCELINK_SUFFIX: ".txt",
        };
    </script>
      <script type="text/javascript" src="../../_static/jquery.js"></script>
      <script type="text/javascript" src="../../_static/underscore.js"></script>
      <script type="text/javascript" src="../../_static/doctools.js"></script>
      <script type="text/javascript" src="../../_static/translations.js"></script>
      <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script>
       
  

  
  
    <script type="text/javascript" src="../../_static/js/theme.js"></script>
  
  
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script>
  <script src="../../_static/js/paddle_doc_init.js"></script> 

</body>
</html>