提交 a9086bf3 编写于 作者: X Xin Pan

also move a few other dir to legacy/

上级 c2b3df65
...@@ -178,7 +178,7 @@ include(inference_lib) # add paddle fluid inference libraries ...@@ -178,7 +178,7 @@ include(inference_lib) # add paddle fluid inference libraries
include_directories("${PADDLE_SOURCE_DIR}") include_directories("${PADDLE_SOURCE_DIR}")
include_directories("${PADDLE_SOURCE_DIR}/paddle/cuda/include") include_directories("${PADDLE_SOURCE_DIR}/paddle/legacy/cuda/include")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/go/pserver/client/c") include_directories("${CMAKE_CURRENT_BINARY_DIR}/go/pserver/client/c")
...@@ -222,7 +222,7 @@ add_subdirectory(proto) ...@@ -222,7 +222,7 @@ add_subdirectory(proto)
if(NOT MOBILE_INFERENCE AND NOT WITH_FLUID_ONLY) if(NOT MOBILE_INFERENCE AND NOT WITH_FLUID_ONLY)
# "add_subdirectory(go)" should be placed after the following loine, # "add_subdirectory(go)" should be placed after the following loine,
# because it depends on paddle/optimizer. # because it depends on paddle/optimizer.
add_subdirectory(paddle/optimizer) add_subdirectory(paddle/legacy/optimizer)
endif() endif()
# "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
......
...@@ -159,4 +159,4 @@ This will enable VLOG messages generated by `buddy_allocator.{h,cc}` and in the ...@@ -159,4 +159,4 @@ This will enable VLOG messages generated by `buddy_allocator.{h,cc}` and in the
- verbose level 1: [framework](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework) - verbose level 1: [framework](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework)
- verbose level 3: [operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) - verbose level 3: [operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)
- verbose level 5: [memory](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory), [platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/platform) - verbose level 5: [memory](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory), [platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/platform)
- verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/math) - verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/math)
...@@ -65,7 +65,7 @@ paddle_error paddle_matrix_get_shape(paddle_matrix matrix, ...@@ -65,7 +65,7 @@ paddle_error paddle_matrix_get_shape(paddle_matrix matrix,
而在CPP里面实现这个C的接口,文件 `paddle_matrix.cpp` 而在CPP里面实现这个C的接口,文件 `paddle_matrix.cpp`
```cpp ```cpp
#include "paddle/math/matrix.h" #include "paddle/legacy/math/matrix.h"
extern "C" extern "C"
paddle_error paddle_matrix_shape(paddle_matrix matrix, paddle_error paddle_matrix_shape(paddle_matrix matrix,
uint64_t *width, uint64_t *width,
......
...@@ -153,7 +153,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 ...@@ -153,7 +153,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。
- 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。 - 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。
- 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 - 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。
- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h` 。 - 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/legacy/math/Matrix.h`和:code:`paddle/legacy/math/BaseMatrix.h` 。
- 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。 - 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。
......
...@@ -154,7 +154,7 @@ The implementation of the forward part has the following steps. ...@@ -154,7 +154,7 @@ The implementation of the forward part has the following steps.
- Every layer must call :code:`Layer::forward(passType);` at the beginning of its :code:`forward` function. - Every layer must call :code:`Layer::forward(passType);` at the beginning of its :code:`forward` function.
- Then it allocates memory for the output using :code:`reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. :code:`reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix. - Then it allocates memory for the output using :code:`reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. :code:`reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix.
- Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/math/Matrix.h` and :code:`paddle/math/BaseMatrix.h`. - Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/legacy/math/Matrix.h` and :code:`paddle/legacy/math/BaseMatrix.h`.
- Finally it applies the activation function using :code:`forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration. - Finally it applies the activation function using :code:`forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration.
......
...@@ -50,12 +50,12 @@ GPU则还需要高并行性,才能发挥其全部能力。这正是它们速 ...@@ -50,12 +50,12 @@ GPU则还需要高并行性,才能发挥其全部能力。这正是它们速
**nvprof** 是Nvidia性能分析工具, **nvvp** 则是带GUI的Nvidia可视化性能分析工具。 **nvprof** 是Nvidia性能分析工具, **nvvp** 则是带GUI的Nvidia可视化性能分析工具。
在这个教程中,我们主要会介绍nvprof和nvvp。 在这个教程中,我们主要会介绍nvprof和nvvp。
:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate :code:`test_GpuProfiler` from :code:`paddle/legacy/math/tests` directory will be used to evaluate
above profilers. above profilers.
:code:`paddle/math/test` 目录中的 :code:`test_GpuProfiler` 就是用于展示上述分析工具的用法。 :code:`paddle/legacy/math/test` 目录中的 :code:`test_GpuProfiler` 就是用于展示上述分析工具的用法。
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:linenos: :linenos:
...@@ -83,7 +83,7 @@ program crashes when CPU version of PaddlePaddle invokes them. ...@@ -83,7 +83,7 @@ program crashes when CPU version of PaddlePaddle invokes them.
1. 加入 :code:`REGISTER_TIMER_INFO` 和 :code:`printAllStatus` 函数(如高亮部分)。 1. 加入 :code:`REGISTER_TIMER_INFO` 和 :code:`printAllStatus` 函数(如高亮部分)。
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:emphasize-lines: 8-12,14 :emphasize-lines: 8-12,14
...@@ -101,8 +101,8 @@ program crashes when CPU version of PaddlePaddle invokes them. ...@@ -101,8 +101,8 @@ program crashes when CPU version of PaddlePaddle invokes them.
.. code-block:: bash .. code-block:: bash
:emphasize-lines: 1,12-15 :emphasize-lines: 1,12-15
> ./paddle/math/tests/test_GpuProfiler > ./paddle/legacy/math/tests/test_GpuProfiler
I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/legacy/math/tests/test_GpuProfiler
I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions
I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done.
[==========] Running 1 test from 1 test case. [==========] Running 1 test from 1 test case.
...@@ -130,7 +130,7 @@ nvprof 工具 ...@@ -130,7 +130,7 @@ nvprof 工具
1. 将 :code:`REGISTER_GPU_PROFILER` 函数加到代码中(参考强调部分)。 1. 将 :code:`REGISTER_GPU_PROFILER` 函数加到代码中(参考强调部分)。
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:emphasize-lines: 6-7 :emphasize-lines: 6-7
...@@ -147,13 +147,13 @@ nvprof 工具 ...@@ -147,13 +147,13 @@ nvprof 工具
.. code-block:: bash .. code-block:: bash
nvprof ./paddle/math/tests/test_GpuProfiler nvprof ./paddle/legacy/math/tests/test_GpuProfiler
然后,您就能获得如下的分析结果: 然后,您就能获得如下的分析结果:
.. code-block:: bash .. code-block:: bash
==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler ==78544== Profiling application: ./paddle/legacy/math/tests/test_GpuProfiler
==78544== Profiling result: ==78544== Profiling result:
Time(%) Time Calls Avg Min Max Name Time(%) Time Calls Avg Min Max Name
27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD]
......
...@@ -51,10 +51,10 @@ For general GPU profiling, a bunch of tools are provided from both NVIDIA and th ...@@ -51,10 +51,10 @@ For general GPU profiling, a bunch of tools are provided from both NVIDIA and th
**nvprof** is Nvidia profiler and **nvvp** is (GUI based) Nvidia visual profiler. **nvprof** is Nvidia profiler and **nvvp** is (GUI based) Nvidia visual profiler.
In this tutorial, we will focus on nvprof and nvvp. In this tutorial, we will focus on nvprof and nvvp.
:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate :code:`test_GpuProfiler` from :code:`paddle/legacy/math/tests` directory will be used to evaluate
above profilers. above profilers.
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:linenos: :linenos:
...@@ -80,7 +80,7 @@ As a simple example, consider the following: ...@@ -80,7 +80,7 @@ As a simple example, consider the following:
1. Add :code:`REGISTER_TIMER_INFO` and :code:`printAllStatus` functions (see the emphasize-lines). 1. Add :code:`REGISTER_TIMER_INFO` and :code:`printAllStatus` functions (see the emphasize-lines).
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:emphasize-lines: 8-12,14 :emphasize-lines: 8-12,14
...@@ -98,8 +98,8 @@ As a simple example, consider the following: ...@@ -98,8 +98,8 @@ As a simple example, consider the following:
.. code-block:: bash .. code-block:: bash
:emphasize-lines: 1,12-15 :emphasize-lines: 1,12-15
> ./paddle/math/tests/test_GpuProfiler > ./paddle/legacy/math/tests/test_GpuProfiler
I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/legacy/math/tests/test_GpuProfiler
I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions
I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done.
[==========] Running 1 test from 1 test case. [==========] Running 1 test from 1 test case.
...@@ -127,7 +127,7 @@ To use this command line profiler **nvprof**, you can simply issue the following ...@@ -127,7 +127,7 @@ To use this command line profiler **nvprof**, you can simply issue the following
1. Add :code:`REGISTER_GPU_PROFILER` function (see the emphasize-lines). 1. Add :code:`REGISTER_GPU_PROFILER` function (see the emphasize-lines).
.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp
:language: c++ :language: c++
:lines: 137-151 :lines: 137-151
:emphasize-lines: 6-7 :emphasize-lines: 6-7
...@@ -144,13 +144,13 @@ To use this command line profiler **nvprof**, you can simply issue the following ...@@ -144,13 +144,13 @@ To use this command line profiler **nvprof**, you can simply issue the following
.. code-block:: bash .. code-block:: bash
nvprof ./paddle/math/tests/test_GpuProfiler nvprof ./paddle/legacy/math/tests/test_GpuProfiler
Then, you can get the following profiling result: Then, you can get the following profiling result:
.. code-block:: bash .. code-block:: bash
==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler ==78544== Profiling application: ./paddle/legacy/math/tests/test_GpuProfiler
==78544== Profiling result: ==78544== Profiling result:
Time(%) Time Calls Avg Min Max Name Time(%) Time Calls Avg Min Max Name
27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD]
......
...@@ -16,7 +16,7 @@ package pserver ...@@ -16,7 +16,7 @@ package pserver
// #cgo CFLAGS: -I ../../ // #cgo CFLAGS: -I ../../
// #cgo LDFLAGS: ${SRCDIR}/client/c/libpaddle_go_optimizer.a -lstdc++ -lm // #cgo LDFLAGS: ${SRCDIR}/client/c/libpaddle_go_optimizer.a -lstdc++ -lm
// #include "paddle/optimizer/optimizer.h" // #include "paddle/legacy/optimizer/optimizer.h"
// #include <stdlib.h> // #include <stdlib.h>
// #include <string.h> // #include <string.h>
import "C" import "C"
......
if(NOT WITH_FLUID_ONLY) if(NOT WITH_FLUID_ONLY)
add_subdirectory(cuda) add_subdirectory(legacy/cuda)
add_subdirectory(function) add_subdirectory(legacy/function)
add_subdirectory(utils) add_subdirectory(utils)
add_subdirectory(math) add_subdirectory(legacy/math)
add_subdirectory(legacy/gserver) add_subdirectory(legacy/gserver)
add_subdirectory(parameter) add_subdirectory(legacy/parameter)
if(MOBILE_INFERENCE) if(MOBILE_INFERENCE)
add_subdirectory(capi) add_subdirectory(capi)
else() else()
add_subdirectory(pserver) add_subdirectory(legacy/pserver)
add_subdirectory(trainer) add_subdirectory(trainer)
add_subdirectory(scripts) add_subdirectory(scripts)
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "PaddleAPIPrivate.h" #include "PaddleAPIPrivate.h"
#include "paddle/parameter/Argument.h" #include "paddle/legacy/parameter/Argument.h"
size_t Arguments::getSlotNum() const { return m->outputs.size(); } size_t Arguments::getSlotNum() const { return m->outputs.size(); }
......
...@@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/math/Matrix.h" #include "paddle/legacy/math/Matrix.h"
#include <cstring> #include <cstring>
#include <iostream> #include <iostream>
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "paddle/math/CpuSparseMatrix.h" #include "paddle/legacy/math/CpuSparseMatrix.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/legacy/math/SparseMatrix.h"
struct MatrixPrivate { struct MatrixPrivate {
std::shared_ptr<paddle::Matrix> mat; std::shared_ptr<paddle::Matrix> mat;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "paddle/legacy/gserver/evaluators/Evaluator.h" #include "paddle/legacy/gserver/evaluators/Evaluator.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/legacy/parameter/ParameterUpdaterBase.h"
#include "paddle/trainer/TrainerConfigHelper.h" #include "paddle/trainer/TrainerConfigHelper.h"
struct GradientMachinePrivate { struct GradientMachinePrivate {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/parameter/Parameter.h" #include "paddle/legacy/parameter/Parameter.h"
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "PaddleAPIPrivate.h" #include "PaddleAPIPrivate.h"
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/parameter/ParameterOptimizer.h" #include "paddle/legacy/parameter/ParameterOptimizer.h"
#include <algorithm> #include <algorithm>
#include "Internal.h" #include "Internal.h"
#include "PaddleAPI.h" #include "PaddleAPI.h"
......
...@@ -18,7 +18,7 @@ limitations under the License. */ ...@@ -18,7 +18,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/parameter/Argument.h" #include "paddle/legacy/parameter/Argument.h"
#include "paddle/utils/Flags.h" #include "paddle/utils/Flags.h"
// used to represent partial sequence // used to represent partial sequence
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "paddle/parameter/Parameter.h" #include "paddle/legacy/parameter/Parameter.h"
#include "paddle/utils/Common.h" #include "paddle/utils/Common.h"
#include "paddle/utils/Flags.h" #include "paddle/utils/Flags.h"
#include "paddle/utils/PythonUtil.h" #include "paddle/utils/PythonUtil.h"
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "PaddleAPI.h" #include "PaddleAPI.h"
#include "paddle/math/Vector.h" #include "paddle/legacy/math/Vector.h"
#include <cstring> #include <cstring>
......
...@@ -14,9 +14,9 @@ limitations under the License. */ ...@@ -14,9 +14,9 @@ limitations under the License. */
#include "capi.h" #include "capi.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/math/Matrix.h" #include "paddle/legacy/math/Matrix.h"
#include "paddle/math/Vector.h" #include "paddle/legacy/math/Vector.h"
#include "paddle/parameter/Argument.h" #include "paddle/legacy/parameter/Argument.h"
#pragma once #pragma once
namespace paddle { namespace paddle {
......
...@@ -51,7 +51,7 @@ It can be used as a helper class that draws the modified graph after each pass. ...@@ -51,7 +51,7 @@ It can be used as a helper class that draws the modified graph after each pass.
## Utilities ## Utilities
There is some helper function/class for analysis. There is some helper legacy/function/class for analysis.
- [dot.h](./dot.h) give a easy to use interface for generating `DOT` codes, - [dot.h](./dot.h) give a easy to use interface for generating `DOT` codes,
- [graph_traits.h](./graph_traits.h) contains the graph traversal algorithms, it uses `iterator` to make the algorithms easy to share across different passes. - [graph_traits.h](./graph_traits.h) contains the graph traversal algorithms, it uses `iterator` to make the algorithms easy to share across different passes.
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include <immintrin.h> #include <immintrin.h>
#include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/detail/activation_functions.h"
// TODO(qingqing) refine this dependence // TODO(qingqing) refine this dependence
#include "paddle/cuda/src/avx_mathfun.h" #include "paddle/legacy/cuda/src/avx_mathfun.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -207,7 +207,7 @@ typedef struct { ...@@ -207,7 +207,7 @@ typedef struct {
#ifdef __NVCC__ #ifdef __NVCC__
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include "paddle/cuda/include/hl_cuda.h" #include "paddle/legacy/cuda/include/hl_cuda.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
extern __thread bool g_sync_flag; extern __thread bool g_sync_flag;
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/cuda/include/hl_base.h" #include "paddle/legacy/cuda/include/hl_base.h"
#include "paddle/cuda/include/hl_sparse.ph" #include "paddle/legacy/cuda/include/hl_sparse.ph"
#include "paddle/cuda/include/hl_top_k.h" #include "paddle/legacy/cuda/include/hl_top_k.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
// using namespace hppl; // using namespace hppl;
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <glog/logging.h> #include <glog/logging.h>
#include "BufferArg.h" #include "BufferArg.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/legacy/math/SparseMatrix.h"
namespace paddle { namespace paddle {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册