new_op_cn.html 55.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>如何写新的Operator &mdash; PaddlePaddle  文档</title>
  

  
  

  

  
  
    

  

  
  
27
    <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
28 29 30 31 32
  

  
  
        <link rel="index" title="索引"
33 34 35
              href="../genindex.html"/>
        <link rel="search" title="搜索" href="../search.html"/>
    <link rel="top" title="PaddlePaddle  文档" href="../index.html"/> 
36 37

  <link rel="stylesheet" href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" type="text/css" />
38
  <link rel="stylesheet" href="../_static/css/override.css" type="text/css" />
39 40 41 42 43 44 45 46 47 48 49 50 51
  <script>
  var _hmt = _hmt || [];
  (function() {
    var hm = document.createElement("script");
    hm.src = "//hm.baidu.com/hm.js?b9a314ab40d04d805655aab1deee08ba";
    var s = document.getElementsByTagName("script")[0]; 
    s.parentNode.insertBefore(hm, s);
  })();
  </script>

  

  
52
  <script src="../_static/js/modernizr.min.js"></script>
53 54 55 56 57 58 59 60

</head>

<body class="wy-body-for-nav" role="document">

  
  <header class="site-header">
    <div class="site-logo">
61
      <a href="/"><img src="../_static/images/PP_w.png"></a>
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
    </div>
    <div class="site-nav-links">
      <div class="site-menu">
        <a class="fork-on-github" href="https://github.com/PaddlePaddle/Paddle" target="_blank"><i class="fa fa-github"></i>Fork me on Github</a>
        <div class="language-switcher dropdown">
          <a type="button" data-toggle="dropdown">
            <span>English</span>
            <i class="fa fa-angle-up"></i>
            <i class="fa fa-angle-down"></i>
          </a>
          <ul class="dropdown-menu">
            <li><a href="/doc_cn">中文</a></li>
            <li><a href="/doc">English</a></li>
          </ul>
        </div>
        <ul class="site-page-links">
          <li><a href="/">Home</a></li>
        </ul>
      </div>
      <div class="doc-module">
        
        <ul>
84 85 86 87 88 89
<li class="toctree-l1"><a class="reference internal" href="../getstarted/index_cn.html">新手入门</a></li>
<li class="toctree-l1"><a class="reference internal" href="../build_and_install/index_cn.html">安装与编译</a></li>
<li class="toctree-l1"><a class="reference internal" href="../howto/index_cn.html">进阶使用</a></li>
<li class="toctree-l1"><a class="reference internal" href="index_cn.html">开发标准</a></li>
<li class="toctree-l1"><a class="reference internal" href="../api/index_cn.html">API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../faq/index_cn.html">FAQ</a></li>
90 91 92 93
</ul>

        
<div role="search">
94
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>        
      </div>
    </div>
  </header>
  
  <div class="main-content-wrap">

    
    <nav class="doc-menu-vertical" role="navigation">
        
          
          <ul>
111 112 113
<li class="toctree-l1"><a class="reference internal" href="../getstarted/index_cn.html">新手入门</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../getstarted/quickstart_cn.html">快速开始</a></li>
<li class="toctree-l2"><a class="reference internal" href="../getstarted/concepts/use_concepts_cn.html">基本使用概念</a></li>
114 115
</ul>
</li>
116 117 118 119 120
<li class="toctree-l1"><a class="reference internal" href="../build_and_install/index_cn.html">安装与编译</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../build_and_install/pip_install_cn.html">使用pip安装</a></li>
<li class="toctree-l2"><a class="reference internal" href="../build_and_install/docker_install_cn.html">使用Docker安装运行</a></li>
<li class="toctree-l2"><a class="reference internal" href="../build_and_install/build_cn.html">用Docker编译和测试PaddlePaddle</a></li>
<li class="toctree-l2"><a class="reference internal" href="../build_and_install/build_from_source_cn.html">从源码编译</a></li>
121 122
</ul>
</li>
123 124 125 126 127
<li class="toctree-l1"><a class="reference internal" href="../howto/index_cn.html">进阶使用</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../howto/cmd_parameter/index_cn.html">命令行参数设置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../howto/cmd_parameter/use_case_cn.html">使用案例</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/cmd_parameter/arguments_cn.html">参数概述</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/cmd_parameter/detail_introduction_cn.html">细节描述</a></li>
128 129
</ul>
</li>
130 131 132 133 134 135 136 137 138
<li class="toctree-l2"><a class="reference internal" href="../howto/cluster/index_cn.html">分布式训练</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../howto/cluster/preparations_cn.html">环境准备</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/cluster/cmd_argument_cn.html">启动参数说明</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/cluster/multi_cluster/index_cn.html">在不同集群中运行</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../howto/cluster/multi_cluster/fabric_cn.html">使用fabric启动集群训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../howto/cluster/multi_cluster/openmpi_cn.html">在OpenMPI集群中提交训练作业</a></li>
<li class="toctree-l4"><a class="reference internal" href="../howto/cluster/multi_cluster/k8s_cn.html">Kubernetes单机训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../howto/cluster/multi_cluster/k8s_distributed_cn.html">Kubernetes分布式训练</a></li>
<li class="toctree-l4"><a class="reference internal" href="../howto/cluster/multi_cluster/k8s_aws_cn.html">Distributed PaddlePaddle Training on AWS with Kubernetes</a></li>
139 140
</ul>
</li>
141 142
</ul>
</li>
143 144 145 146 147 148 149 150 151 152 153
<li class="toctree-l2"><a class="reference internal" href="../howto/capi/index_cn.html">C-API预测库</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../howto/capi/compile_paddle_lib_cn.html">安装与编译C-API预测库</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/capi/organization_of_the_inputs_cn.html">输入/输出数据组织</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/capi/workflow_of_capi_cn.html">C-API使用流程</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../howto/rnn/index_cn.html">RNN相关模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../howto/rnn/rnn_config_cn.html">RNN配置</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/rnn/recurrent_group_cn.html">Recurrent Group教程</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/rnn/hierarchical_layer_cn.html">支持双层序列作为输入的Layer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../howto/rnn/hrnn_rnn_api_compare_cn.html">单双层RNN API对比介绍</a></li>
154 155
</ul>
</li>
156 157 158 159 160 161
<li class="toctree-l2"><a class="reference internal" href="../howto/optimization/gpu_profiling_cn.html">GPU性能调优</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="index_cn.html">开发标准</a><ul>
<li class="toctree-l2"><a class="reference internal" href="contribute_to_paddle_cn.html">如何贡献代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="write_docs_cn.html">如何贡献文档</a></li>
162 163
</ul>
</li>
164 165 166 167 168 169 170 171 172
<li class="toctree-l1"><a class="reference internal" href="../api/index_cn.html">API</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../api/v2/model_configs.html">模型配置</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/activation.html">Activation</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/layer.html">Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/evaluators.html">Evaluators</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/optimizer.html">Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/pooling.html">Pooling</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/networks.html">Networks</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/config/attr.html">Parameter Attribute</a></li>
173 174
</ul>
</li>
175 176 177 178
<li class="toctree-l2"><a class="reference internal" href="../api/v2/data.html">数据访问</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/data/data_reader.html">Data Reader Interface</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/data/image.html">Image Interface</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/data/dataset.html">Dataset</a></li>
179 180
</ul>
</li>
181 182 183 184 185 186 187 188 189 190 191 192 193
<li class="toctree-l2"><a class="reference internal" href="../api/v2/run_logic.html">训练与应用</a></li>
<li class="toctree-l2"><a class="reference internal" href="../api/v2/fluid.html">Fluid</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/layers.html">layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/data_feeder.html">data_feeder</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/executor.html">executor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/initializer.html">initializer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/evaluator.html">evaluator</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/nets.html">nets</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/optimizer.html">optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/param_attr.html">param_attr</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/profiler.html">profiler</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/regularizer.html">regularizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../api/v2/fluid/io.html">io</a></li>
194 195
</ul>
</li>
196 197
</ul>
</li>
198 199 200 201 202 203
<li class="toctree-l1"><a class="reference internal" href="../faq/index_cn.html">FAQ</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../faq/build_and_install/index_cn.html">编译安装与单元测试</a></li>
<li class="toctree-l2"><a class="reference internal" href="../faq/model/index_cn.html">模型配置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../faq/parameter/index_cn.html">参数设置</a></li>
<li class="toctree-l2"><a class="reference internal" href="../faq/local/index_cn.html">本地训练与预测</a></li>
<li class="toctree-l2"><a class="reference internal" href="../faq/cluster/index_cn.html">集群训练与预测</a></li>
204 205
</ul>
</li>
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
</ul>

        
    </nav>
    
    <section class="doc-content-wrap">

      

 







<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
      
    <li>如何写新的Operator</li>
  </ul>
</div>
      
      <div class="wy-nav-content" id="doc-content">
        <div class="rst-content">
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="operator">
<span id="operator"></span><h1>如何写新的Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h1>
<ul class="simple">
<li><a class="reference external" href="#概念简介">概念简介</a></li>
239 240 241 242 243
<li><a class="reference external" href="#实现c类">实现C++类</a><ul>
<li><a class="reference external" href="#定义protomaker类">定义ProtoMaker类</a></li>
<li><a class="reference external" href="#定义operator类">定义Operator类</a></li>
<li><a class="reference external" href="#定义opkernel类">定义OpKernel类</a></li>
<li><a class="reference external" href="#注册operator">注册Operator</a></li>
244 245 246
<li><a class="reference external" href="#编译">编译</a></li>
</ul>
</li>
247
<li><a class="reference external" href="#绑定python">绑定Python</a></li>
248
<li><a class="reference external" href="#实现单元测试">实现单元测试</a><ul>
249 250
<li><a class="reference external" href="#前向operator单测">前向Operator单测</a></li>
<li><a class="reference external" href="#反向operator单测">反向Operator单测</a></li>
251
<li><a class="reference external" href="#编译和执行">编译和执行</a></li>
252 253
</ul>
</li>
254
<li><a class="reference external" href="#注意事项">注意事项</a></li>
255 256 257 258 259 260 261 262 263 264
</ul>
<div class="section" id="">
<span id="id1"></span><h2>概念简介<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
<p>简单介绍需要用到基类,详细介绍请参考设计文档。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OperatorBase</span></code>: Operator(简写,Op)基类。</li>
<li><code class="docutils literal"><span class="pre">framework::OpKernel</span></code>: Op计算函数的基类,称作Kernel。</li>
<li><code class="docutils literal"><span class="pre">framework::OperatorWithKernel</span></code>:继承自OperatorBase,Op有计算函数,称作有Kernel。</li>
<li><code class="docutils literal"><span class="pre">class</span> <span class="pre">OpProtoAndCheckerMaker</span></code>:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成</li>
</ul>
265
<p>依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>,后者继承自<code class="docutils literal"><span class="pre">OperatorBase</span></code>。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下:</p>
266 267 268
<p>内容            | 定义位置
&#8212;&#8212;&#8212;&#8212;&#8211;  | :&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;&#8212;-
OpProtoMake定义  | <code class="docutils literal"><span class="pre">.cc</span></code>文件,Backward Op不需要定义OpProtoMake
269
Op定义           | <code class="docutils literal"><span class="pre">.cc</span></code>文件
270 271
Kernel实现       | CPU、CUDA共享Kernel实现在<code class="docutils literal"><span class="pre">.h</span></code>文件中,否则,CPU 实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,CUDA 实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中。
注册Op           | Op注册实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件;Kernel注册CPU实现在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,CUDA实现在<code class="docutils literal"><span class="pre">.cu</span></code>文件中</p>
272
<p>实现新的op都添加至目录<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators">paddle/operators</a>下,文件命名以<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有) 、 <code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)结尾。<strong>系统会根据文件名自动构建op和其对应的Python扩展。</strong></p>
273 274 275 276 277
<p>下面以矩阵乘操作,即<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc">MulOp</a>为例来介绍如何写带Kernel的Operator。</p>
</div>
<div class="section" id="c">
<span id="c"></span><h2>实现C++类<a class="headerlink" href="#c" title="永久链接至标题"></a></h2>
<div class="section" id="protomaker">
278
<span id="protomaker"></span><h3>定义ProtoMaker类<a class="headerlink" href="#protomaker" title="永久链接至标题"></a></h3>
279 280
<p>矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。</p>
<p>首先定义<code class="docutils literal"><span class="pre">ProtoMaker</span></code>来描述该Op的输入、输出,并添加注释:</p>
281 282
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
283
  <span class="n">MulOpMaker</span><span class="p">(</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
284
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
285 286 287
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (M x K)&quot;</span><span class="p">);</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (K x N)&quot;</span><span class="p">);</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;(Tensor), 2D tensor of size (M x N)&quot;</span><span class="p">);</span>
288 289 290 291
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s"></span>
<span class="s">Two Element Mul Operator.</span>
<span class="s">The equation is: Out = X * Y</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
292 293 294 295
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
296
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43"><code class="docutils literal"><span class="pre">MulOpMaker</span></code></a>继承自<code class="docutils literal"><span class="pre">framework::OpProtoAndCheckerMaker</span></code>,构造函数含有2个参数:</p>
297 298 299 300
<ul class="simple">
<li><code class="docutils literal"><span class="pre">framework::OpProto</span></code> : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。</li>
<li><code class="docutils literal"><span class="pre">framework::OpAttrChecker</span></code> :后者用于检查参数属性的合法性。</li>
</ul>
301
<p>构造函数里通过<code class="docutils literal"><span class="pre">AddInput</span></code>添加输入参数,通过<code class="docutils literal"><span class="pre">AddOutput</span></code>添加输出参数,通过<code class="docutils literal"><span class="pre">AddComment</span></code>添加Op的注释。这些函数会将对应内容添加到<code class="docutils literal"><span class="pre">OpProto</span></code>中。</p>
302
<p>上面的代码在<code class="docutils literal"><span class="pre">MulOp</span></code>中添加两个输入<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>,添加了一个输出<code class="docutils literal"><span class="pre">Out</span></code>,并解释了各自含义,命名请遵守<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/name_convention.md">命名规范</a></p>
303
<p>再以<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37"><code class="docutils literal"><span class="pre">ScaleOp</span></code></a>为例:</p>
304 305 306
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">AttrType</span><span class="o">&gt;</span>
<span class="k">class</span> <span class="nc">ScaleOpMaker</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpProtoAndCheckerMaker</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
307
  <span class="n">ScaleOpMaker</span><span class="p">(</span><span class="n">OpProto</span> <span class="o">*</span><span class="n">proto</span><span class="p">,</span> <span class="n">OpAttrChecker</span> <span class="o">*</span><span class="n">op_checker</span><span class="p">)</span>
308 309 310 311 312 313 314
      <span class="o">:</span> <span class="n">OpProtoAndCheckerMaker</span><span class="p">(</span><span class="n">proto</span><span class="p">,</span> <span class="n">op_checker</span><span class="p">)</span> <span class="p">{</span>
    <span class="n">AddInput</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">,</span> <span class="s">&quot;The input tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddOutput</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">,</span> <span class="s">&quot;The output tensor of scale operator.&quot;</span><span class="p">).</span><span class="n">NotInGradient</span><span class="p">();</span>
    <span class="n">AddComment</span><span class="p">(</span><span class="sa">R</span><span class="s">&quot;</span><span class="dl">DOC(</span><span class="s">Scale operator</span>
<span class="s">The equation is: Out = scale*X</span>
<span class="dl">)DOC</span><span class="s">&quot;</span><span class="p">);</span>
    <span class="n">AddAttr</span><span class="o">&lt;</span><span class="n">AttrType</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;scale&quot;</span><span class="p">,</span> <span class="s">&quot;scale of scale operator.&quot;</span><span class="p">).</span><span class="n">SetDefault</span><span class="p">(</span><span class="mf">1.0</span><span class="p">);</span>
315 316 317 318
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
319
<p>这个例子有两处不同:</p>
320
<ul class="simple">
321
<li><code class="docutils literal"><span class="pre">AddInput(&quot;X&quot;,&quot;...&quot;).NotInGradient()</span></code> : 表示<code class="docutils literal"><span class="pre">X</span></code>这个输入不参与<code class="docutils literal"><span class="pre">ScaleOp</span></code>对应的梯度Op计算之中,如果Op的某个输入不参与反向梯度的计算,请显示地调用<code class="docutils literal"><span class="pre">.NotInGradient()</span></code>进行设置。</li>
322 323 324 325
<li><code class="docutils literal"><span class="pre">AddAttr&lt;AttrType&gt;(&quot;scale&quot;,</span> <span class="pre">&quot;...&quot;).SetDefault(1.0);</span></code> : 增加<code class="docutils literal"><span class="pre">scale</span></code>系数,作为参数属性,并且设置默认值为1.0。</li>
</ul>
</div>
<div class="section" id="operator">
326
<span id="id2"></span><h3>定义Operator类<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
327
<p>下面的点实现了MulOp的定义:</p>
328
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MulOp</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span> <span class="p">{</span>
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
 <span class="k">public</span><span class="o">:</span>
  <span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>

 <span class="k">protected</span><span class="o">:</span>
  <span class="kt">void</span> <span class="n">InferShape</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">InferShapeContext</span> <span class="o">&amp;</span><span class="n">ctx</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
    <span class="k">auto</span> <span class="n">dim0</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="k">auto</span> <span class="n">dim1</span> <span class="o">=</span> <span class="n">ctx</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">dims</span><span class="p">();</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim0</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input X(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span><span class="n">dim1</span><span class="p">.</span><span class="n">size</span><span class="p">(),</span> <span class="mi">2</span><span class="p">,</span>
                      <span class="s">&quot;input Y(%s) should be a tensor with 2 dims, a matrix&quot;</span><span class="p">,</span>
                      <span class="n">ctx</span><span class="p">.</span><span class="n">op_</span><span class="p">.</span><span class="n">Input</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">));</span>
    <span class="n">PADDLE_ENFORCE_EQ</span><span class="p">(</span>
        <span class="n">dim0</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
        <span class="s">&quot;First matrix&#39;s width must be equal with second matrix&#39;s height.&quot;</span><span class="p">);</span>
    <span class="n">ctx</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">)</span><span class="o">-&gt;</span><span class="n">Resize</span><span class="p">({</span><span class="n">dim0</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">dim1</span><span class="p">[</span><span class="mi">1</span><span class="p">]});</span>
  <span class="p">}</span>
<span class="p">};</span>
</pre></div>
</div>
<p><a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22"><code class="docutils literal"><span class="pre">MulOp</span></code></a>继承自<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code><code class="docutils literal"><span class="pre">public</span></code>成员:</p>
351
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">using</span> <span class="n">framework</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="o">::</span><span class="n">OperatorWithKernel</span><span class="p">;</span>
352 353 354
</pre></div>
</div>
<p>这句表示使用基类<code class="docutils literal"><span class="pre">OperatorWithKernel</span></code>的构造函数,也可写成:</p>
355
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="n">MulOp</span><span class="p">(</span><span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span> <span class="o">&amp;</span><span class="n">type</span><span class="p">,</span> <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">inputs</span><span class="p">,</span>
356 357 358 359 360
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">VariableNameMap</span> <span class="o">&amp;</span><span class="n">outputs</span><span class="p">,</span>
      <span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">AttributeMap</span> <span class="o">&amp;</span><span class="n">attrs</span><span class="p">)</span>
  <span class="o">:</span> <span class="n">OperatorWithKernel</span><span class="p">(</span><span class="n">type</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">outputs</span><span class="p">,</span> <span class="n">attrs</span><span class="p">)</span> <span class="p">{}</span>
</pre></div>
</div>
361 362 363 364 365
<p>还需要重写<code class="docutils literal"><span class="pre">InferShape</span></code>接口。<code class="docutils literal"><span class="pre">InferShape</span></code>为const函数,不能修改Op的成员变量,参数为<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::InferShapeContext</span> <span class="pre">&amp;ctx</span></code>,通过该参数可获取到输入输出以及属性。它的功能是:</p>
<ul class="simple">
<li>1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。</li>
<li>2). 设置输出Tensor的形状。</li>
</ul>
366
<p>通常<code class="docutils literal"><span class="pre">OpProtoMaker</span></code><code class="docutils literal"><span class="pre">Op</span></code>类的定义写在<code class="docutils literal"><span class="pre">.cc</span></code>文件中,和下面将要介绍的注册函数一起放在<code class="docutils literal"><span class="pre">.cc</span></code></p>
367 368
</div>
<div class="section" id="opkernel">
369
<span id="opkernel"></span><h3>定义OpKernel类<a class="headerlink" href="#opkernel" title="永久链接至标题"></a></h3>
370 371
<p><code class="docutils literal"><span class="pre">MulKernel</span></code>继承自<code class="docutils literal"><span class="pre">framework::OpKernel</span></code>,带有下面两个模板参数:</p>
<ul class="simple">
372
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">DeviceContext</span></code>: 表示设备类型,不同设备(CPU、CUDA)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></li>
373 374 375 376 377 378 379 380 381
<li><code class="docutils literal"><span class="pre">typename</span> <span class="pre">T</span></code> : 表示数据类型,如<code class="docutils literal"><span class="pre">float</span></code>, <code class="docutils literal"><span class="pre">double</span></code>等。</li>
</ul>
<p>需要为<code class="docutils literal"><span class="pre">MulKernel</span></code>类重写<code class="docutils literal"><span class="pre">Compute</span></code>接口。</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">Compute</span></code>接受一个输入参数:<code class="docutils literal"><span class="pre">const</span> <span class="pre">framework::ExecutionContext&amp;</span> <span class="pre">context</span></code></li>
<li><code class="docutils literal"><span class="pre">InferShapeContext</span></code>相比,<code class="docutils literal"><span class="pre">ExecutionContext</span></code>增加了设备类型,同样可获取到输入输出和属性参数。</li>
<li><code class="docutils literal"><span class="pre">Compute</span></code>函数里实现<code class="docutils literal"><span class="pre">OpKernel</span></code>的具体计算逻辑。</li>
</ul>
<p>下面是 <code class="docutils literal"><span class="pre">MulKernel</span></code> <code class="docutils literal"><span class="pre">Compute</span></code>的实现:</p>
382 383 384 385 386 387 388 389 390 391 392 393
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">template</span> <span class="o">&lt;</span><span class="k">typename</span> <span class="n">DeviceContext</span><span class="p">,</span> <span class="k">typename</span> <span class="n">T</span><span class="o">&gt;</span>
<span class="k">class</span> <span class="nc">MulKernel</span> <span class="o">:</span> <span class="k">public</span> <span class="n">framework</span><span class="o">::</span><span class="n">OpKernel</span> <span class="p">{</span>
<span class="k">public</span><span class="o">:</span>
<span class="kt">void</span> <span class="n">Compute</span><span class="p">(</span><span class="k">const</span> <span class="n">framework</span><span class="o">::</span><span class="n">ExecutionContext</span><span class="o">&amp;</span> <span class="n">context</span><span class="p">)</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">X</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;X&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Y</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Input</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Y&quot;</span><span class="p">);</span>
  <span class="k">auto</span><span class="o">*</span> <span class="n">Z</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="n">Output</span><span class="o">&lt;</span><span class="n">Tensor</span><span class="o">&gt;</span><span class="p">(</span><span class="s">&quot;Out&quot;</span><span class="p">);</span>
  <span class="n">Z</span><span class="o">-&gt;</span><span class="n">mutable_data</span><span class="o">&lt;</span><span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="n">context</span><span class="p">.</span><span class="n">GetPlace</span><span class="p">());</span>
  <span class="k">auto</span><span class="o">&amp;</span> <span class="n">device_context</span> <span class="o">=</span> <span class="n">context</span><span class="p">.</span><span class="k">template</span> <span class="n">device_context</span><span class="o">&lt;</span><span class="n">DeviceContext</span><span class="o">&gt;</span><span class="p">();</span>
  <span class="n">math</span><span class="o">::</span><span class="n">matmul</span><span class="o">&lt;</span><span class="n">DeviceContext</span><span class="p">,</span> <span class="n">T</span><span class="o">&gt;</span><span class="p">(</span><span class="o">*</span><span class="n">X</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="o">*</span><span class="n">Y</span><span class="p">,</span> <span class="nb">false</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Z</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">device_context</span><span class="p">);</span>
<span class="p">}</span>
<span class="p">};</span>
394 395
</pre></div>
</div>
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
<p>需要注意:<strong>不同设备(CPU、CUDA)共享一个Op定义,是否则共享同一个<code class="docutils literal"><span class="pre">OpKernel</span></code>,取决于<code class="docutils literal"><span class="pre">Compute</span></code>调用的函数是否支持不同设备。</strong></p>
<p><code class="docutils literal"><span class="pre">MulOp</span></code>的CPU、CUDA实现共享同一个<code class="docutils literal"><span class="pre">Kernel</span></code><code class="docutils literal"><span class="pre">OpKernel</span></code>不共享的例子可以参考:<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43"><code class="docutils literal"><span class="pre">OnehotCrossEntropyOpKernel</span></code></a></p>
<p>为了使<code class="docutils literal"><span class="pre">OpKernel</span></code>的计算过程书写更加简单,并且CPU、CUDA的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现<code class="docutils literal"><span class="pre">Compute</span></code>接口。关于在PaddlePaddle中如何使用Eigen库,请参考<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md">使用文档</a></p>
<p>到此,前向Op实现完成。接下来,需要在<code class="docutils literal"><span class="pre">.cc</span></code>文件中注册该op和kernel。
反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。<strong>但需注意反向Op没有<code class="docutils literal"><span class="pre">ProtoMaker</span></code></strong></p>
</div>
<div class="section" id="operator">
<span id="id3"></span><h3>注册Operator<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
<ul>
<li><p class="first"><code class="docutils literal"><span class="pre">.cc</span></code>文件中注册前向、反向Op类,注册CPU Kernel。</p>
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
<span class="n">REGISTER_OP</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOp</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpMaker</span><span class="p">,</span> <span class="n">mul_grad</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulOpGrad</span><span class="p">);</span>
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUDeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_CPU_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
              <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CPUDeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
411 412
</pre></div>
</div>
413 414 415 416 417 418 419 420
<p>在上面的代码中:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">REGISTER_OP</span></code> : 注册<code class="docutils literal"><span class="pre">ops::MulOp</span></code>类,类型名为<code class="docutils literal"><span class="pre">mul</span></code>,该类的<code class="docutils literal"><span class="pre">ProtoMaker</span></code><code class="docutils literal"><span class="pre">ops::MulOpMaker</span></code>,注册<code class="docutils literal"><span class="pre">ops::MulOpGrad</span></code>,类型名为<code class="docutils literal"><span class="pre">mul_grad</span></code></li>
<li><code class="docutils literal"><span class="pre">REGISTER_OP_WITHOUT_GRADIENT</span></code> : 用于注册没有反向的Op。</li>
<li><code class="docutils literal"><span class="pre">REGISTER_OP_CPU_KERNEL</span></code> :注册<code class="docutils literal"><span class="pre">ops::MulKernel</span></code>类,并特化模板参数为<code class="docutils literal"><span class="pre">paddle::platform::CPUPlace</span></code><code class="docutils literal"><span class="pre">float</span></code>类型,同理,注册<code class="docutils literal"><span class="pre">ops::MulGradKernel</span></code>类。</li>
</ul>
</li>
</ul>
421
<ul>
422
<li><p class="first"><code class="docutils literal"><span class="pre">.cu</span></code>文件中注册CUDA Kernel。</p>
423
<ul class="simple">
424
<li>请注意,如果CUDA Kernel的实现基于Eigen unsupported模块,那么在 <code class="docutils literal"><span class="pre">.cu</span></code>的开始请加上宏定义 <code class="docutils literal"><span class="pre">#define</span> <span class="pre">EIGEN_USE_GPU</span></code>,代码示例如下:</li>
425
</ul>
426
<div class="highlight-cpp"><div class="highlight"><pre><span></span><span class="c1">// if use Eigen unsupported module before include head files</span>
427
<span class="cp">#define EIGEN_USE_GPU</span>
428 429

<span class="k">namespace</span> <span class="n">ops</span> <span class="o">=</span> <span class="n">paddle</span><span class="o">::</span><span class="n">operators</span><span class="p">;</span>
430 431 432
<span class="n">REGISTER_OP_CUDA_KERNEL</span><span class="p">(</span><span class="n">mul</span><span class="p">,</span> <span class="n">ops</span><span class="o">::</span><span class="n">MulKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CUDADeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
<span class="n">REGISTER_OP_CUDA_KERNEL</span><span class="p">(</span><span class="n">mul_grad</span><span class="p">,</span>
                       <span class="n">ops</span><span class="o">::</span><span class="n">MulGradKernel</span><span class="o">&lt;</span><span class="n">paddle</span><span class="o">::</span><span class="n">platform</span><span class="o">::</span><span class="n">CUDADeviceContext</span><span class="p">,</span> <span class="kt">float</span><span class="o">&gt;</span><span class="p">);</span>
433 434
</pre></div>
</div>
435 436
</li>
</ul>
437 438
</div>
<div class="section" id="">
439
<span id="id4"></span><h3>编译<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
440
<p>运行下面命令可以进行编译:</p>
441 442 443 444 445 446 447
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">make</span> <span class="n">mul_op</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="python">
<span id="python"></span><h2>绑定Python<a class="headerlink" href="#python" title="永久链接至标题"></a></h2>
448
<p>系统会对新增的op自动绑定Python,并链接到生成的lib库中。</p>
449 450
</div>
<div class="section" id="">
451
<span id="id5"></span><h2>实现单元测试<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
452
<p>单测包括对比前向Op不同设备(CPU、CUDA)的实现、对比反向OP不同设备(CPU、CUDA)的实现、反向Op的梯度测试。下面介绍介绍<a class="reference external" href="https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py"><code class="docutils literal"><span class="pre">MulOp</span></code>的单元测试</a></p>
453 454
<div class="section" id="operator">
<span id="id6"></span><h3>前向Operator单测<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
455
<p>Op单元测试继承自<code class="docutils literal"><span class="pre">OpTest</span></code>。各项更加具体的单元测试在<code class="docutils literal"><span class="pre">TestMulOp</span></code>里完成。测试Operator,需要:</p>
456 457 458 459
<ol class="simple">
<li><code class="docutils literal"><span class="pre">setUp</span></code>函数定义输入、输出,以及相关的属性参数。</li>
<li>生成随机的输入数据。</li>
<li>在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。</li>
460
<li>反向计算已经自动集成进测试框架,直接调用相应接口即可。</li>
461
</ol>
462 463
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">unittest</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>
464
<span class="kn">from</span> <span class="nn">op_test</span> <span class="kn">import</span> <span class="n">OpTest</span>
465 466


467
<span class="k">class</span> <span class="nc">TestMulOp</span><span class="p">(</span><span class="n">OpTest</span><span class="p">):</span>
468
    <span class="k">def</span> <span class="nf">setUp</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
469
        <span class="bp">self</span><span class="o">.</span><span class="n">op_type</span> <span class="o">=</span> <span class="s2">&quot;mul&quot;</span>
470 471 472 473 474
        <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span>
            <span class="s1">&#39;X&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">32</span><span class="p">,</span> <span class="mi">84</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">),</span>
            <span class="s1">&#39;Y&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">random</span><span class="p">((</span><span class="mi">84</span><span class="p">,</span> <span class="mi">100</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
        <span class="p">}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">outputs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;Out&#39;</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;X&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;Y&#39;</span><span class="p">])}</span>
475 476 477

    <span class="k">def</span> <span class="nf">test_check_output</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_output</span><span class="p">()</span>
478

479 480
    <span class="k">def</span> <span class="nf">test_check_grad_normal</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">([</span><span class="s1">&#39;X&#39;</span><span class="p">,</span> <span class="s1">&#39;Y&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">)</span>
481

482
    <span class="k">def</span> <span class="nf">test_check_grad_ingore_x</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
483
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
484
            <span class="p">[</span><span class="s1">&#39;Y&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span> <span class="n">no_grad_set</span><span class="o">=</span><span class="nb">set</span><span class="p">(</span><span class="s2">&quot;X&quot;</span><span class="p">))</span>
485

486
    <span class="k">def</span> <span class="nf">test_check_grad_ingore_y</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
487
        <span class="bp">self</span><span class="o">.</span><span class="n">check_grad</span><span class="p">(</span>
488
            <span class="p">[</span><span class="s1">&#39;X&#39;</span><span class="p">],</span> <span class="s1">&#39;Out&#39;</span><span class="p">,</span> <span class="n">max_relative_error</span><span class="o">=</span><span class="mf">0.5</span><span class="p">,</span> <span class="n">no_grad_set</span><span class="o">=</span><span class="nb">set</span><span class="p">(</span><span class="s1">&#39;Y&#39;</span><span class="p">))</span>
489 490
</pre></div>
</div>
491 492 493 494 495 496
<p>上面的代码首先导入依赖的包,下面是对<code class="docutils literal"><span class="pre">setUp</span></code>函数中操作的重要变量的详细解释:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">self.op_type</span> <span class="pre">=</span> <span class="pre">&quot;mul&quot;</span></code> : 定义类型,与operator注册时注册的类型一致。</li>
<li><code class="docutils literal"><span class="pre">self.inputs</span></code> : 定义输入,类型为<code class="docutils literal"><span class="pre">numpy.array</span></code>,并初始化。</li>
<li><code class="docutils literal"><span class="pre">self.outputs</span></code> : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。</li>
</ul>
497 498 499
</div>
<div class="section" id="operator">
<span id="id7"></span><h3>反向operator单测<a class="headerlink" href="#operator" title="永久链接至标题"></a></h3>
500
<p>而反向测试中:</p>
501
<ul class="simple">
502 503 504 505
<li><code class="docutils literal"><span class="pre">test_check_grad_normal</span></code>中调用<code class="docutils literal"><span class="pre">check_grad</span></code>使用数值法检测梯度正确性和稳定性。<ul>
<li>第一个参数<code class="docutils literal"><span class="pre">[&quot;X&quot;,</span> <span class="pre">&quot;Y&quot;]</span></code> : 指定对输入变量<code class="docutils literal"><span class="pre">X</span></code><code class="docutils literal"><span class="pre">Y</span></code>做梯度检测。</li>
<li>第二个参数<code class="docutils literal"><span class="pre">&quot;Out&quot;</span></code> : 指定前向网络最终的输出目标变量<code class="docutils literal"><span class="pre">Out</span></code></li>
<li>第三个参数<code class="docutils literal"><span class="pre">max_relative_error</span></code>:指定检测梯度时能容忍的最大错误值。</li>
506 507
</ul>
</li>
508
<li><code class="docutils literal"><span class="pre">test_check_grad_ingore_x</span></code><code class="docutils literal"><span class="pre">test_check_grad_ingore_y</span></code>分支用来测试只需要计算一个输入梯度的情况。</li>
509
</ul>
510
</div>
511
<div class="section" id="">
512
<span id="id8"></span><h3>编译和执行<a class="headerlink" href="#" title="永久链接至标题"></a></h3>
513
<p><code class="docutils literal"><span class="pre">python/paddle/v2/framework/tests</span></code> 目录下新增的 <code class="docutils literal"><span class="pre">test_*.py</span></code> 单元测试会被自动加入工程进行编译。</p>
514
<p>请注意,<strong>不同于Op的编译测试,运行单元测试测时需要编译整个工程</strong>,并且编译时需要打开<code class="docutils literal"><span class="pre">WITH_TESTING</span></code>, 即<code class="docutils literal"><span class="pre">cmake</span> <span class="pre">paddle_dir</span> <span class="pre">-DWITH_TESTING=ON</span></code>。编译成功后,执行下面的命令来运行单元测试:</p>
515
<div class="highlight-bash"><div class="highlight"><pre><span></span>make <span class="nb">test</span> <span class="nv">ARGS</span><span class="o">=</span><span class="s2">&quot;-R test_mul_op -V&quot;</span>
516 517 518
</pre></div>
</div>
<p>或者:</p>
519
<div class="highlight-bash"><div class="highlight"><pre><span></span>ctest -R test_mul_op
520 521 522
</pre></div>
</div>
</div>
523
</div>
524
<div class="section" id="">
525
<span id="id9"></span><h2>注意事项<a class="headerlink" href="#" title="永久链接至标题"></a></h2>
526 527 528
<ul class="simple">
<li>为每个Op创建单独的<code class="docutils literal"><span class="pre">*_op.h</span></code>(如有)、<code class="docutils literal"><span class="pre">*_op.cc</span></code><code class="docutils literal"><span class="pre">*_op.cu</span></code>(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。</li>
<li>注册Op时的类型名,需要和该Op的名字一样。即不允许在<code class="docutils literal"><span class="pre">A_op.cc</span></code>里面,注册<code class="docutils literal"><span class="pre">REGISTER_OP(B,</span> <span class="pre">...)</span></code>等,这将会导致单元测试出错。</li>
529
<li>如果Op没有实现CUDA Kernel,请不要创建空的<code class="docutils literal"><span class="pre">*_op.cu</span></code>,这将会导致单元测试出错。</li>
530 531 532
<li>如果多个Op依赖一些共用的函数,可以创建非<code class="docutils literal"><span class="pre">*_op.*</span></code>格式的文件来存放,如<code class="docutils literal"><span class="pre">gather.h</span></code>文件。</li>
</ul>
</div>
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
</div>


           </div>
          </div>
          <footer>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016, PaddlePaddle developers.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
566
            URL_ROOT:'../',
567 568 569 570 571 572 573
            VERSION:'',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
            HAS_SOURCE:  true,
            SOURCELINK_SUFFIX: ".txt",
        };
    </script>
574 575 576 577
      <script type="text/javascript" src="../_static/jquery.js"></script>
      <script type="text/javascript" src="../_static/underscore.js"></script>
      <script type="text/javascript" src="../_static/doctools.js"></script>
      <script type="text/javascript" src="../_static/translations.js"></script>
578 579 580 581 582 583
      <script type="text/javascript" src="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"></script>
       
  

  
  
584
    <script type="text/javascript" src="../_static/js/theme.js"></script>
585 586 587 588
  
  
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
  <script src="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/js/perfect-scrollbar.jquery.min.js"></script>
589
  <script src="../_static/js/paddle_doc_init.js"></script> 
590 591 592

</body>
</html>